diff --git a/docs/monitor-templates.md b/docs/monitor-templates.md new file mode 100644 index 0000000..8061bb9 --- /dev/null +++ b/docs/monitor-templates.md @@ -0,0 +1,46 @@ +# Monitor templates + +Monitor templates are used for keeping track of state changes within the printout of the `eba` CFG. + +## Overview + +All monitor templates must implement the following signature: + +```ocaml +module type PrinterSpec = sig + (** The internal state of the printing monitor **) + type state + (** The transition function for the monitor. State changes happen here based on the previous state and the effects found in the input *) + val transition: state -> e list -> state + (** An indication of whether the printing monitor should be shown in the printout **) + val is_in_interesting_section: state -> bool + (** The initial state of the monitor **) + val initial_state: state + (* An indication of whether the monitor accepts an effect found in a CFG step *) + val is_in_transition_labels: e -> bool + (* An indication of whether the monitor is in its final state *) + val is_in_final_state: state -> bool + (* A string representation of the state of the monitor, displayed in the printout *) + val string_of_state: state -> string +end +``` + +For an example of a full implementation of this signature, see [checkAutomataDoubleUnlock.ml](../src/checkAutomataDoubleUnlock.ml) + +The definition of when and how state changes should happen is implemented in the `transition` function. This can efficiently be implemented as a pattern match on what the previous state was, and what the incoming input is. + +# The Printer [CFGPrinter](../src/cfgPrinter.ml) + +Monitor template definitions are passed to the [CFGPrinter.ml](../src/cfgPrinter.ml). This printer takes an implementation of a monitor template, and the CFG `step`. This step is passed to the CFGPrinter by the outer `eba` analysis logic. + +The CFGPrinter will explore the `eba` CFG and apply any effects found within a `step` in the CFG, provided that these effects are in the `is_in_transition_labels` of the monitor template implementation. The resulting state of applying the transition function of the monitor with the effects found in the `step` is preserved in the CFGPrinter as a mapping from the region a monitor template operates on to the current state of that monitor. + +Whenever effects involving a previously untracked region are encountered, a new instance of the monitor template is instantiated in its initial state, and the transition function is then applied with these effects. + +After the application of the transition function of the monitor template and the new state has been found, the CFGPrinter will filter the monitor template results to remove monitors in their final states. + +Finally, if a monitor reports that `it is_in_interesting_section`, the state of the monitor will be added to the printout. + +# Adding a new printing monitor template implementation to `eba` + +Creating a new printing monitor template implementation _should_ be the only thing required in order to add more information to the CFG printout, but modifications or additions might be needed in [CFGPrinter.ml](../src/cfgPrinter.ml). This depends on the use case and complexity of the printout. diff --git a/src/abs.ml b/src/abs.ml index c838cc5..40951d3 100644 --- a/src/abs.ml +++ b/src/abs.ml @@ -49,6 +49,8 @@ module rec AFile : sig val fprint : unit IO.output -> t -> unit + val global_variables_and_regions : t -> (Cil.varinfo, Regions.t) BatMap.t + end = struct module VarMap = Hashtbl.Make(Utils.Varinfo) @@ -172,6 +174,14 @@ end = struct let eprint = fprint stderr + let global_variables_and_regions t = + VarMap.fold (fun varinfo entry acc -> + match entry with + | Var(sch, _) -> + Map.add varinfo Regions.(Scheme.regions_in sch) acc + | Fun _ -> acc + ) t Map.empty + end and AFun : sig diff --git a/src/cfgPrinter.ml b/src/cfgPrinter.ml new file mode 100644 index 0000000..4835a9b --- /dev/null +++ b/src/cfgPrinter.ml @@ -0,0 +1,382 @@ +open Batteries + +open Type +open Abs +open Option.Infix + + +module type PrinterSpec = sig + + type state + val transition: state -> Effects.e list -> state + val is_in_interesting_section: state -> bool + val initial_state: state + val is_in_transition_labels: Effects.e -> bool + val is_in_final_state: state -> bool + val string_of_state: state -> string + +end + + +module type Printer = sig + + val print : AFile.t -> Cil.fundec -> int -> unit + +end + +module MakeT (Monitor: PrinterSpec) = struct + + type step = PathTree.step + + (** step.lenv seems not referentially transparent. This equality test on + steps seems to work (ignoring step.lenv) *) + module StepMap = Map.Make (struct + + type t = PathTree.step + + let compare_kind (k1: PathTree.step_kind) (k2: PathTree.step_kind): int = + match k1, k2 with + | Stmt il1, Stmt il2 -> + Pervasives.compare il1 il2 + (* a gamble: instr can embed expressions, and elsewhere there is a + custom comparator of expressions. If out of memory errors reappear + in map, then consider refining this to use CilExtra.compareExp. *) + | Test (tk1, e1), Test (tk2, e2) when tk1 = tk2 -> + CilExtra.compareExp e1 e2 + | Test (tk1, _), Test (tk2, _) -> + Pervasives.compare tk1 tk2 + | Goto (la1,lo1), Goto (la2, lo2) when la1 = la2 -> + Cil.compareLoc lo1 lo2 + | Goto (la1,_), Goto (la2, _) -> + Pervasives.compare la1 la2 + | Ret eo1, Ret eo2 -> Option.compare ~cmp:CilExtra.compareExp eo1 eo2 + | Stmt _, _ -> -1 + | _, Stmt _ -> +1 + | Test _, _ -> -1 + | _, Test _ -> +1 + | Goto _, _ -> -1 + | _________ -> +1 + + let compare (s1: step) (s2: step): int = + match compare_kind s1.kind s2.kind, Effects.compare s1.effs s2.effs with + | 0, 0 -> Cil.compareLoc s1.sloc s2.sloc + | 0, result -> result + | result, _ -> result + + end) + +module RegionMap = Map.Make (Region) + + type 'a set = 'a Set.t + type 'a region_map = 'a RegionMap.t + type color = Monitor.state + type typ = Cil.typ + type config = color set region_map + type progress = { + colors : config StepMap.t ; (* states of pertinent monitors after each step *) + current: config ; (* states passed from the previous step *) + path : unit -> PathTree.t (* the path that still needs to be explored *) + } + + + let assert_bool = OUnit2.assert_bool;; + + + let is_call = function + | Cil.(Call _) -> true + | ____________ -> false ;; + + + (** Get region from a memory effect, ignore others *) + let get_region (e: Effects.e): (region * Effects.e) option = + match e with + | Effects.Mem (_, region) -> Some (region, e) + | _______________________ -> None ;; + + + let group_by_region (reffs: (region * 'b) list): (region * 'b list) list = + reffs + |> List.group (fun r r' -> Region.compare (fst r) (fst r')) + |> List.map List.split + |> List.map (Tuple2.map1 List.hd) ;; + + + + let pp_colors (colors: color set): PP.doc = + PP.(colors + |> Set.to_list + |> List.sort Pervasives.compare + |> List.map @@ double_quotes % words % Monitor.string_of_state + |> comma_sep + |> brackets + ) ;; + + + (** Format the effects of the step/line *) + let pp_effects_regions (effects: Effects.EffectSet.t): PP.doc = + PP.(effects + |> Effects.EffectSet.to_list + |> List.unique + |> List.map Effects.pp_e + |> List.map double_quotes + |> comma_sep + |> brackets ) ;; + + + let effect_name = function + | Effects.Mem (k, _)-> Effects.string_of_kind k + | Effects.Noret -> "noret" + | Effects.IrqsOn -> "irqson" + | Effects.IrqsOff -> "irqsoff" + | Effects.BhsOn -> "bhson" + | Effects.BhsOff -> "bhsoff" + | Effects.Sleep -> "sleep" + | _________________ -> "" ;; + + + (** Format the effects of the step/line *) + let pp_effects (effects: Effects.EffectSet.t): PP.doc = + PP.(effects + |> Effects.EffectSet.to_list + |> List.map effect_name + |> List.filter (fun d -> d != "") + |> List.unique + |> List.map @@ double_quotes % words + |> comma_sep + |> brackets ) ;; + + + (** Format regions accessed in the step/line *) + let pp_regions (regions: region list): PP.doc = + PP.(regions + |> List.map Region.pp + |> List.map PP.double_quotes + |> comma_sep + |> brackets) ;; + + let pp_region_doc (r: region) (s: PP.doc): PP.doc = + PP.(newline + !^ "-" ++ double_quotes (Region.pp r) ++ colon ++ s) ;; + + (** Format a map of region to string properties (pre-printed) *) + let pp_region_doc_map (field: string) (smap: PP.doc region_map): PP.doc = + PP.( + if RegionMap.is_empty smap then empty + else newline + !^ field + colon + ( + RegionMap.bindings smap + |> List.map @@ uncurry pp_region_doc + |> concat + )) + ;; + + + let type_to_string t: string = + t |> Cil.d_type () |> Pretty.sprint ~width:800 ;; + + + + (** Print a single output line *) + let pp_step (rt: Cil.varinfo region_map) (step: step) (colors: config): PP.doc = + let regions = step.effs.may + |> Effects.EffectSet.to_list + |> List.filter_map get_region + |> List.map fst in + let varinfos = List.filter_map (flip RegionMap.find_opt rt) regions in + let type_names = varinfos + |> List.map (fun (v: Cil.varinfo) -> v.vtype) + |> List.map type_to_string + |> List.unique + |> List.map @@ PP.double_quotes % PP.words + |> PP.comma_sep |> PP.brackets in + let var_names = varinfos + |> List.map (fun (v: Cil.varinfo) -> v.vname) + |> List.unique + |> List.map @@ PP.double_quotes % PP.words + |> PP.comma_sep |> PP.brackets + in PP.( + newline + words "- line:" ++ int step.sloc.line + newline + + indent ( + words "source: |-" + newline + + indent (PathTree.pp_step step) + + pp_region_doc_map "lock_colors" (RegionMap.mapi (fun _ -> pp_colors) colors) + + newline + words "effects:" ++ pp_effects_regions step.effs.may + + newline + words "effects_names:" ++ pp_effects step.effs.may + + newline + words "effects_vars:" ++ var_names + + newline + words "effects_regions:" ++ pp_regions regions + + newline + words "types:" ++ type_names + ) + ) ;; + + + let cmp_step_config (sc1: step * config) (sc2: step * config): int = + let s1, s2 = fst sc1, fst sc2 in + Pervasives.compare s1.sloc.line s2.sloc.line ;; + + + (** Format the file info and the prefix *) + let pp_prefix (file: string) (func: string) + (rm: Cil.varinfo region_map) (regions: region list): PP.doc = + let region_names = regions + |> List.map (fun r -> + r, PP.double_quotes @@ PP.words @@ (RegionMap.find r rm).vname) + |> List.enum + |> RegionMap.of_enum in + let region_types = regions + |> List.map (fun r -> + r, PP.double_quotes @@ PP.words @@ type_to_string (RegionMap.find r rm).vtype) + |> List.enum + |> RegionMap.of_enum + in PP.( + words "- func:" ++ !^ func + newline + + indent ( + !^ "file:" ++ double_quotes (!^ file) + + pp_region_doc_map "lock_names" region_names + + pp_region_doc_map "lock_types" region_types + + newline + !^ "lines:" + )) ;; + + + (** Enumerate all regions in a step map, so presumably all monitored regions + seen in a function *) + let regions_of_stepmap (m: config StepMap.t): region list = m + |> StepMap.values + |> Enum.map RegionMap.keys + |> Enum.map Regions.of_enum + |> Enum.reduce Regions.union + |> Regions.to_list ;; + + + (** Print all lines in the provided map *) + let pp_steps (file: string) (func: string) (rm: Cil.varinfo region_map) (colors: config StepMap.t): PP.doc = + let regions = regions_of_stepmap colors + in PP.(colors + |> StepMap.bindings + |> List.stable_sort cmp_step_config + |> List.map @@ uncurry @@ pp_step rm + |> concat + |> indent + |> append @@ pp_prefix file func rm regions + |> flip append @@ newline + ) ;; + + + + (** Collect all regions mentioned in [coloring] of all steps, and complete all + entries for each step with initial states, so that all steps return maps + (colorings, configs) with the same domain. *) + let make_total (coloring: config StepMap.t): config StepMap.t = + let initials = coloring + |> regions_of_stepmap + |> List.map (fun r -> r, Set.singleton Monitor.initial_state) + |> List.enum + |> RegionMap.of_enum + in StepMap.map (RegionMap.union (fun _ _ r -> Some r) initials) coloring ;; + + + (** Merge the old and new set of colors for a region by unioning them. Do this + for any region that is mentioned in either new or the old map. *) + let conf_union (proposal: config) (seen: config): config = + RegionMap.union (fun _ s1 s2 -> Some (Set.union s1 s2)) proposal seen ;; + + + let add_colors (step: step) (to_add: config) (old: config StepMap.t): config StepMap.t = + StepMap.modify_def RegionMap.empty step (conf_union to_add) old + + + (** Execute all monitor automata in the configuration [current] by letting + them see all the effects in the map [effects]. Both structures are + indexed by regions, and the operation is point-wise. Produces a successor + configuration for a step (which is the coloring used in the next step). *) + let fire_transitions (current: config) (effects: Effects.e list region_map) + : config = + let f _ (s: color set option) (e: Effects.e list option): color set option = + match s, e with + | Some states, Some effects -> + Some (Set.map (flip Monitor.transition effects) states) + | Some states, None -> + Some (Set.map (flip Monitor.transition []) states) + | None, Some effects -> + Some (Set.singleton @@ Monitor.transition Monitor.initial_state effects) + | _ -> None + in RegionMap.merge f current effects ;; + + + (** Explore a step of execution, a CFG edge, without inlining. This function + updates all the dictionary tracking data for printing. It only applies the + step to monitors that have not been applied to this step. Otherwise the + progress state is not changed. *) + let step_over (progress: progress) (step: step): progress = + let successors = step.effs.may + |> Effects.EffectSet.filter Monitor.is_in_transition_labels + |> Effects.EffectSet.to_list + |> List.filter_map get_region + |> group_by_region + |> Seq.of_list + |> RegionMap.of_seq + |> fire_transitions progress.current in + let colors1 = add_colors step successors progress.colors + in { + current = successors ; + colors = colors1; + path = progress.path + } ;; + + + (** Explore all reachable paths from the current state of exploration, + captured by [progress]. *) + let rec explore_paths (func: AFun.t) (inline_limit: int) (progress: progress) + : progress = + match progress.path () with + | Seq (step, remaining) -> + let progress1 = Some step + |> Option.filter (fun _ -> inline_limit > 0) + |> Option.filter (PathTree.exists_in_stmt is_call) + >>= PathTree.inline func + |> Option.map (fun fp -> explore_paths func (inline_limit - 1) + { progress with path = snd fp }) + |? step_over progress step + in explore_paths func inline_limit { progress1 with path = remaining } + + | Assume (_, _, remaining) -> + explore_paths func inline_limit { progress with path = remaining } + + | If (true_path, false_path) -> + let progress1 = + explore_paths func inline_limit { progress with path = true_path } + in explore_paths func inline_limit { + progress with colors = progress1.colors; path = false_path } + + | Nil -> progress + ;; + + + (** This is the main function of the module. It explores the paths in the + [file] and prints the coloring for the lines traversed (according to a + lock monitor). *) + let print (file: AFile.t) (decl_f: Cil.fundec) (inline_limit: int): unit = + let _ = Printexc.record_backtrace true in + let func = AFile.find_fun file decl_f.svar + |> Option.get + |> snd in + let global = AFile.global_variables_and_regions file + |> Map.to_seq in + let local = decl_f.sformals @ decl_f.slocals + |> Seq.of_list + |> Seq.map (fun e -> e, AFun.regions_of func e) in + let rtmap = Seq.append local global + |> Seq.map (fun vrr -> fst vrr, snd vrr |> Regions.to_seq) + |> Seq.flat_map (uncurry @@ fun (v: Cil.varinfo) -> Seq.map (Tuple2.make v)) + |> Seq.map Tuple2.swap + |> RegionMap.of_seq in + let initial = { + colors = StepMap.empty ; + current = RegionMap.empty; + path = PathTree.paths_of func } in + let outcome = + Printexc.pass (fun _ -> explore_paths func inline_limit initial) () in + let coloring = make_total outcome.colors in + let printout = pp_steps decl_f.svar.vdecl.file decl_f.svar.vname rtmap coloring + in SmartPrint.to_stdout 10000 2 printout ;; + +end + +module Make (P: PrinterSpec): Printer = MakeT (P) diff --git a/src/cfgPrinterDoubleLock.ml b/src/cfgPrinterDoubleLock.ml new file mode 100644 index 0000000..f1e26ca --- /dev/null +++ b/src/cfgPrinterDoubleLock.ml @@ -0,0 +1,76 @@ +open Batteries +open Type +open Effects + +(** A monitor that attempts to mark critical sections on a control flow graph. + It is assumed that it is run on correct code, so it can do this + unambigously. Since this is a big assumption (a CFG is too coarse an + abstraction), it tries to recover heuristically after experiencing + conflicts (lock-lock, unlock-unlock) or nondeterminism. + + The state space is a powerset of the states of a regular monitor. *) +module SpecT = struct + + type state = + | Red (* The monitor sees a lock statement/effect *) + | Green (* The monitor sees an unlock statement/effect *) + | RedGreen (* The monitor sees both lock and unlock effects *) + | Orange (* The monitor is in a critical section *) + | Black (* The monitor is outside critical sections *) + | BlackOrange (* Either inside or outside; either possible *) + + let initial_state = Black + let transition_labels = [Lock; Unlock] + + let locks effects: bool = + List.exists (function Mem (Lock, _) -> true | _ -> false) effects ;; + + let unlocks effects: bool = + List.exists (function Mem (Unlock, _) -> true | _ -> false) effects ;; + + let is_in_transition_labels (effect: Effects.e): bool = + match effect with + | Mem(kind, _) -> List.mem kind transition_labels + | ____________ -> false + + (* This transition relation is 'resilient'. It tries not to crash, + but extract info for learning as much as possible. When confused + restarts, it is meant to be run on error free files. + Confused means that we found an error, which we should not if cfg + printer is only used in learning on correct files. We restart + when this happens. *) + let transition current (effect: Effects.e list): state = + match current, locks effect, unlocks effect with + + | ________, true, false -> Red + | ________, false, true -> Green + | ________, true, true -> RedGreen + | Red, false, false -> Orange + | Green, false, false -> Black + | RedGreen, false, false -> BlackOrange + | pred, false, false -> pred + + ;; + + + (* ignored in cfg printer *) + let is_in_interesting_section _ : bool = true + + (* ignored in cfg printer *) + let is_in_final_state _ : bool = false + + let string_of_state state = + match state with + | Orange -> "orange" + | Black -> "black" + | BlackOrange -> "black|orange" + | Red -> "red" + | Green -> "green" + | RedGreen -> "red|green" + +end + +module Spec: CfgPrinter.PrinterSpec = SpecT + +module Printer = CfgPrinter.Make(Spec) +include Printer diff --git a/src/cfgPrinter_test.ml b/src/cfgPrinter_test.ml new file mode 100644 index 0000000..c4fc438 --- /dev/null +++ b/src/cfgPrinter_test.ml @@ -0,0 +1,40 @@ +open Batteries +open OUnit2 +open Libeba +open Type + +module M = CfgPrinterDoubleLock.SpecT +module P = CfgPrinter.MakeT (M) +module O = OUnitAssert + +module RegionMap = P.RegionMap + + +let cmp_colors (a: M.state P.set RegionMap.t) + (b: M.state P.set RegionMap.t): bool = + RegionMap.equal Set.equal a b;; + +let cunion _ (s1: 'a Set.t) (s2: 'b Set.t) = Some (Set.union s1 s2) ;; + +let test_conf_union _ = + let r1 = Region.meta () in + let r2 = Region.meta () in + let n = Set.of_list [M.Orange; M.Black] in + let o = Set.of_list [M.Black] in + let nm1, om1 = RegionMap.singleton r1 n, RegionMap.singleton r1 o in + let nm2, om2 = RegionMap.singleton r1 n, RegionMap.singleton r2 o in + let nm3, om3 = RegionMap.singleton r2 n, RegionMap.singleton r1 o in + let nm4, om4 = RegionMap.union cunion nm2 om2, RegionMap.union cunion nm3 om3 in + O.assert_equal (P.conf_union nm1 om1) nm1; + O.assert_equal (P.conf_union nm2 om2 |> RegionMap.find r1) (RegionMap.union cunion nm2 om2 |> RegionMap.find r1); + O.assert_equal (P.conf_union nm2 om2 |> RegionMap.find r2) (RegionMap.union cunion nm2 om2 |> RegionMap.find r2); + O.assert_equal (P.conf_union nm2 om2 |> RegionMap.cardinal) (RegionMap.union cunion nm2 om2 |> RegionMap.cardinal); + O.assert_equal ~cmp:cmp_colors (P.conf_union nm2 om2) (RegionMap.union cunion nm2 om2); + O.assert_equal ~cmp:cmp_colors (P.conf_union nm4 om4) ([r1, n; r2, n] |> List.enum |> RegionMap.of_enum) ;; + + +let cfgPrinter_tests = "cfgPrinter" >::: [ + "conf_union" >:: test_conf_union; + ] + +let _ = run_test_tt_main cfgPrinter_tests diff --git a/src/dune b/src/dune index 4feaa15..8e6da28 100644 --- a/src/dune +++ b/src/dune @@ -1,8 +1,63 @@ +(library + (name libeba) + (modules abs + automataChecker + axioms + cfgPrinter + cfgPrinterDoubleLock + checkAutomataDoubleUnlock + checkBhOnIrqFlow2 + checkDFreeFlow2 + checkDLockFlow2 + checkDUnlockFlow2 + checkDUnlockFlow2Inverse + checkUAF + checkUninitFlow1 + cilExtra + env + error + flow1Checker + flow2Checker + gcc + infer + lenv + opts + pP + pathTree + structs + type + utils + uniq) + (libraries + batteries + cil + ocamlgraph + smart_print + cmdliner + dolog + fpath + ounit2 + )) + (executable - (name eba) - (package eba) - (public_name eba) - (libraries batteries cil ocamlgraph smart_print cmdliner dolog fpath)) + (name eba) + (package eba) + (public_name eba) + (modules eba) + (libraries libeba)) + +(test + (name cfgPrinter_test) + (modules cfgPrinter_test) + (libraries ounit2 libeba)) + +(test + (name type_test) + (modules type_test) + (libraries ounit2 libeba)) + + + (env (_ (flags (:standard -warn-error -A)))) diff --git a/src/eba.ml b/src/eba.ml index 7867efe..4964044 100644 --- a/src/eba.ml +++ b/src/eba.ml @@ -3,6 +3,7 @@ open Batteries open Cmdliner open Dolog +open Libeba open Abs module L = LazyList @@ -49,23 +50,11 @@ let run_checks checks file fileAbs :unit = then run_check_fun fd CheckUAF.in_func; if checks.chk_dlock then run_check_fun fd CheckDLockFlow2.in_func; - if checks.chk_dfree - then run_check_fun fd CheckDFreeFlow2.in_func; - if checks.chk_dunlock - then run_check_fun fd CheckDUnlockFlow2.in_func; - if checks.chk_dunlock_inv - then run_check_fun fd CheckDUnlockFlow2Inverse.in_func; if checks.chk_birq then run_check_fun fd CheckBhOnIrqFlow2.in_func; ); - if checks.chk_automata_double_unlock - then - List.map (fun fd -> CheckAutomataDoubleUnlock.check fileAbs fd) fds - |> List.flatten - |> CheckAutomataDoubleUnlock.filter_results - |> CheckAutomataDoubleUnlock.stringify_results - |> L.of_list - |> print_bugs + if Opts.Get.print_cfg() then + List.iter (fun fd -> CfgPrinterDoubleLock.print fileAbs fd (Opts.Get.inline_limit())) fds let infer_file checks fn = let file = Frontc.parse fn () in @@ -96,7 +85,7 @@ let log_level_of_int = function | _ -> Log.DEBUG (* x >= 3 *) let infer_files verbosity - flag_gcstats flag_saveabs flag_warn_output flag_fake_gcc + flag_gcstats flag_saveabs flag_warn_output flag_fake_gcc flag_no_static flag_print_cfg flag_no_dce flag_no_dfe flag_safe_casts flag_externs_do_nothing opt_inline_limit opt_loop_limit opt_branch_limit flag_no_path_check flag_all_lock_types flag_no_match_lock_exp flag_ignore_writes @@ -108,6 +97,8 @@ let infer_files verbosity Log.set_log_level (log_level_of_int verbosity); Opts.Set.gc_stats flag_gcstats; Opts.Set.save_abs flag_saveabs; + Opts.Set.no_static flag_no_static; + Opts.Set.print_cfg flag_print_cfg; Opts.Set.warn_output flag_warn_output; Opts.Set.dce (not flag_no_dce); Opts.Set.dfe (not flag_no_dfe); @@ -154,6 +145,14 @@ let flag_fake_gcc = let doc = "Fake GCC and preprocess input file." in Arg.(value & flag & info ["fake-gcc"] ~doc) +let flag_no_static = + let doc = "Explore non-static functions only." in + Arg.(value & flag & info ["no-static"] ~doc) + +let flag_print_cfg = + let doc = "Print the CFG with effects." in + Arg.(value & flag & info ["print-double-lock-cfg"] ~doc) + (* Type inferrer*) let flag_no_dce = @@ -254,7 +253,7 @@ let cmd = ] in Term.(pure infer_files $ verbose - $ flag_gcstats $ flag_saveabs $ flag_warn_output $ flag_fake_gcc + $ flag_gcstats $ flag_saveabs $ flag_warn_output $ flag_fake_gcc $ flag_no_static $ flag_print_cfg $ flag_no_dce $ flag_no_dfe $ flag_safe_casts $ flag_externs_do_nothing $ opt_inline_limit $ opt_loop_limit $ opt_branch_limit $ flag_no_path_check $ flag_all_lock_types $ flag_no_match_lock_exp $ flag_ignore_writes diff --git a/src/opts.ml b/src/opts.ml index c8178d4..f997406 100644 --- a/src/opts.ml +++ b/src/opts.ml @@ -4,6 +4,8 @@ type copts = { mutable gc_stats : bool; mutable save_abs : bool; mutable warn_output : bool; + mutable no_static : bool; + mutable print_cfg : bool; (* Type inferrer *) mutable dce : bool; mutable dfe : bool; @@ -28,6 +30,8 @@ let opts : copts = { gc_stats = false; save_abs = false; warn_output = false; + print_cfg = false; + no_static = false; dce = true; dfe = true; @@ -55,6 +59,10 @@ struct let save_abs v = opts.save_abs <- v + let no_static v = opts.no_static <- v + + let print_cfg v = opts.print_cfg <- v + let warn_output v = opts.warn_output <- v let dce v = opts.dce <- v @@ -90,6 +98,10 @@ struct let save_abs () = opts.save_abs + let no_static () = opts.no_static + + let print_cfg () = opts.print_cfg + let warn_output () = opts.warn_output let dce () = opts.dce diff --git a/src/opts.mli b/src/opts.mli index 425708a..0f83c32 100644 --- a/src/opts.mli +++ b/src/opts.mli @@ -5,6 +5,10 @@ module Set : sig val save_abs : bool -> unit + val no_static : bool -> unit + + val print_cfg : bool -> unit + val warn_output : bool -> unit val dce : bool -> unit @@ -39,6 +43,10 @@ module Get : sig val save_abs : unit -> bool + val no_static : unit -> bool + + val print_cfg : unit -> bool + val warn_output : unit -> bool val dfe : unit -> bool diff --git a/src/pP.ml b/src/pP.ml index fc67b16..f3544fc 100644 --- a/src/pP.ml +++ b/src/pP.ml @@ -31,14 +31,20 @@ let int = SP.OCaml.int let prefix str doc = !^ str + doc +let append = SP.append + let parens = SP.parens let braces = SP.braces let brackets = SP.brakets +let double_quotes = SP.double_quotes + let angle_brackets = SP.angle_brakets +let concat = SP.concat + let separate = SP.separate let newline_sep = separate newline @@ -53,4 +59,8 @@ let indent = SP.indent let words = SP.words +let nest = SP.nest + let to_string = SP.to_string 70 4 + +let to_stdout = SP.to_stdout 70 4 diff --git a/src/pathTree.ml b/src/pathTree.ml index 114d54b..ad1fd5f 100644 --- a/src/pathTree.ml +++ b/src/pathTree.ml @@ -101,11 +101,17 @@ type cond = Cond of test_kind * Cil.exp * Cil.location let tk_of_option = Option.map_default (fun b -> TWhile b) TOther -let find_in_stmt finder step = +let find_in_stmt (finder: Cil.instr list -> 'a option) (step: step): 'a option = match step.kind with | Stmt(is) -> finder is | _else_____ -> None +(** Holds iff [step] is a statement and contains an instruction satisfying [p]*) +let exists_in_stmt (p: Cil.instr -> bool) (step: step): bool = + match step.kind with + | Stmt instrs -> List.exists p instrs + | ___________ -> false + let pp_step step = match step.kind with | Stmt is -> @@ -422,7 +428,7 @@ let rec reachable ks t ~guard ~target ~trace st :('st * step * path * t delayed) let open L in match t() with | Assume(cond,b,t') -> - let res = map (push_dec cond b) in + let res = map (push_dec cond b) in let explored = (reachable ks t' guard target trace st) in explored |> res | Seq(step,t') -> @@ -510,4 +516,4 @@ let inline_check ~bound ~filter ~guard ~target ~trace ~caller st step = ) [] stack in inline_check_loop ~bound ~filter ~guard ~target ~trace caller st step - |> Option.map (Tuple2.map2 path_of_stack) \ No newline at end of file + |> Option.map (Tuple2.map2 path_of_stack) diff --git a/src/pathTree.mli b/src/pathTree.mli index aa3e30c..0c5589b 100644 --- a/src/pathTree.mli +++ b/src/pathTree.mli @@ -25,6 +25,10 @@ type step = { val find_in_stmt : (Cil.instr list -> 'a option) -> step -> 'a option +(** Holds iff the provided step is a statement and contains an + instruction satisfying the provided predicate. *) +val exists_in_stmt : (Cil.instr -> bool) -> step -> bool + val string_of_step : step -> string val pp_step : step -> PP.doc @@ -113,4 +117,4 @@ val inline_check : 'st -> step -> (step * path) option -val inline : AFun.t -> step -> (AFun.t * t delayed) option \ No newline at end of file +val inline : AFun.t -> step -> (AFun.t * t delayed) option diff --git a/src/type.ml b/src/type.ml index 0c6bed5..5f970f9 100644 --- a/src/type.ml +++ b/src/type.ml @@ -491,7 +491,7 @@ module rec Shape : sig (* THINK how to handle extern struct declarations when we * add support for inter-file analysis. Usually there are also * a number of function prototypes referring to these structs. - * But that's OK, because such structs are never actually used. + * But that's OK, because such structs are never actually used. * * For now, just return a dummy struct. *) @@ -716,6 +716,8 @@ and Region : sig val is_meta : t -> bool + val is_bound : t -> bool + (** * if_meta f ?r x = f ?r x * if_meta f 'r x = x @@ -766,6 +768,8 @@ end = struct | Meta _ -> true | Bound _ -> false + let is_bound = not % is_meta + let if_meta f r x = Utils.apply_if (is_meta r) (f r) x let if_bound f r x = Utils.apply_if (not (is_meta r)) (f r) x @@ -1233,6 +1237,10 @@ and Effects : sig val zonk : t -> t + val string_of_kind: mem_kind -> string + + val pp_kind: mem_kind -> PP.doc + val pp_e : e -> PP.doc val pp : t -> PP.doc @@ -1243,9 +1251,9 @@ and Effects : sig = struct type mem_kind = Alloc | Free | Read | Write | Uninit | Call | Lock | Unlock - + let mem_kind_pp m = match m with - | Alloc -> "Alloc" + | Alloc -> "Alloc" | Free -> "Free" | Read -> "Read" | Write -> "Write" @@ -1988,10 +1996,10 @@ and Subst : sig } let of_enum_pair xs ys = - let zs = Enum.combine(VarEnum.shapes xs,VarEnum.shapes ys) in - let rs = Enum.combine(VarEnum.regions xs,VarEnum.regions ys) in - let fs = Enum.combine(VarEnum.effects xs,VarEnum.effects ys) in - make zs rs fs + let zs = Enum.combine (VarEnum.shapes xs) (VarEnum.shapes ys) in + let rs = Enum.combine (VarEnum.regions xs) (VarEnum.regions ys) in + let fs = Enum.combine (VarEnum.effects xs) (VarEnum.effects ys) in + make zs rs fs let find_shape a {shapes} = ShapeMap.Exceptionless.find a shapes diff --git a/src/type_test.ml b/src/type_test.ml new file mode 100644 index 0000000..f24edbd --- /dev/null +++ b/src/type_test.ml @@ -0,0 +1,30 @@ +open OUnit2 +open Libeba + +module O = OUnitAssert +module P = CfgPrinter.MakeT (CfgPrinterDoubleLock.Spec) +module Region = Libeba.Type.Region + +let bound_region_unification _ = + + let r1, r2 = Region.meta (), Region.meta () in + let sr1, sr2 = Region.to_string r1, Region.to_string r2 in + + let pre_msg = Printf.sprintf "Fresh regions before unification differ! (%s, %s)" sr1 sr2 in + let _ = assert_bool pre_msg (Region.equal r1 r2 |> not) in + + let _ = Region.(r1 =~ r2) in + let ur1, ur2 = Region.to_string r1, Region.to_string r2 in + let pre_msg = Printf.sprintf "Regions after unification still differ! (%s, %s)" ur1 ur2 in + let _ = assert_bool pre_msg (Region.equal r1 r2 |> not) in + + let zr1, zr2 = Region.to_string (Region.zonk r1), Region.to_string (Region.zonk r2) in + let post_msg = Printf.sprintf "Region zonks after unification equal! (%s, %s)" zr1 zr2 + in assert_bool post_msg (Region.equal (Region.zonk r1) (Region.zonk r2)) ;; + + +let tests = "type" >::: [ + "unification equates regions, but not uniq ids" >:: bound_region_unification; +] + +let run = run_test_tt_main tests diff --git a/src/utils.ml b/src/utils.ml index b7776da..0e60fb4 100644 --- a/src/utils.ml +++ b/src/utils.ml @@ -1,5 +1,6 @@ open Batteries +open Dolog let compare_on f x y = Pervasives.compare (f x) (f y) @@ -20,11 +21,17 @@ let match_pair = function | [a;b] -> (a,b) | ____ -> Error.panic_with "match_pair: not a 2-element list" -let colored cd str = Printf.sprintf "\027[%sm%s\027[0m" cd str +(*let colored cd str = Printf.sprintf "\027[%sm%s\027[0m" cd str*) +let colored cd str = Printf.sprintf "%s%s" cd str -let green = colored "0;32" + +(*let green = colored "0;32" let purple = colored "0;35" -let cyan = colored "0;36" +let cyan = colored "0;36"*) + +let green = colored "" +let purple = colored "" +let cyan = colored "" let check_if_file_exists fname :unit = if Sys.file_exists fname diff --git a/test/aa340845ae6-reduced.c b/test/aa340845ae6-reduced.c new file mode 100644 index 0000000..6ff69e0 --- /dev/null +++ b/test/aa340845ae6-reduced.c @@ -0,0 +1,1133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Shared application/kernel submission and completion ring pairs, for + * supporting fast/efficient IO. + * + * A note on the read/write ordering memory barriers that are matched between + * the application and kernel side. + * + * After the application reads the CQ ring tail, it must use an + * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses + * before writing the tail (using smp_load_acquire to read the tail will + * do). It also needs a smp_mb() before updating CQ head (ordering the + * entry load(s) with the head store), pairing with an implicit barrier + * through a control-dependency in io_get_cqring (smp_store_release to + * store head will do). Failure to do so could lead to reading invalid + * CQ entries. + * + * Likewise, the application must use an appropriate smp_wmb() before + * writing the SQ tail (ordering SQ entry stores with the tail store), + * which pairs with smp_load_acquire in io_get_sqring (smp_store_release + * to store the tail will do). And it needs a barrier ordering the SQ + * head load before writing new SQ entries (smp_load_acquire to read + * head will do). + * + * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application + * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* + * updating the SQ tail; a full memory barrier smp_mb() is needed + * between. + * + * Also see the examples in the liburing library: + * + * git://git.kernel.dk/liburing + * + * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens + * from data shared between the kernel and application. This is done both + * for ordering purposes, but also to ensure that once a value is loaded from + * data that the application could potentially modify, it remains stable. + * + * Copyright (C) 2018-2019 Jens Axboe + * Copyright (c) 2018-2019 Christoph Hellwig + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#include + +#include "internal.h" +#include "io-wq.h" + +#define IORING_MAX_ENTRIES 32768 +#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) + +/* + * Shift of 9 is 512 entries, or exactly one page on 64-bit archs + */ +#define IORING_FILE_TABLE_SHIFT 9 +#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT) +#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1) +#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE) + +struct io_uring { + u32 head ____cacheline_aligned_in_smp; + u32 tail ____cacheline_aligned_in_smp; +}; + +/* + * This data is shared with the application through the mmap at offsets + * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. + * + * The offsets to the member fields are published through struct + * io_sqring_offsets when calling io_uring_setup. + */ +struct io_rings { + /* + * Head and tail offsets into the ring; the offsets need to be + * masked to get valid indices. + * + * The kernel controls head of the sq ring and the tail of the cq ring, + * and the application controls tail of the sq ring and the head of the + * cq ring. + */ + struct io_uring sq, cq; + /* + * Bitmasks to apply to head and tail offsets (constant, equals + * ring_entries - 1) + */ + u32 sq_ring_mask, cq_ring_mask; + /* Ring sizes (constant, power of 2) */ + u32 sq_ring_entries, cq_ring_entries; + /* + * Number of invalid entries dropped by the kernel due to + * invalid index stored in array + * + * Written by the kernel, shouldn't be modified by the + * application (i.e. get number of "new events" by comparing to + * cached value). + * + * After a new SQ head value was read by the application this + * counter includes all submissions that were dropped reaching + * the new SQ head (and possibly more). + */ + u32 sq_dropped; + /* + * Runtime SQ flags + * + * Written by the kernel, shouldn't be modified by the + * application. + * + * The application needs a full memory barrier before checking + * for IORING_SQ_NEED_WAKEUP after updating the sq tail. + */ + u32 sq_flags; + /* + * Runtime CQ flags + * + * Written by the application, shouldn't be modified by the + * kernel. + */ + u32 cq_flags; + /* + * Number of completion events lost because the queue was full; + * this should be avoided by the application by making sure + * there are not more requests pending than there is space in + * the completion queue. + * + * Written by the kernel, shouldn't be modified by the + * application (i.e. get number of "new events" by comparing to + * cached value). + * + * As completion events come in out of order this counter is not + * ordered with any other data. + */ + u32 cq_overflow; + /* + * Ring buffer of completion events. + * + * The kernel writes completion events fresh every time they are + * produced, so the application is allowed to modify pending + * entries. + */ + struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; +}; + +struct io_mapped_ubuf { + u64 ubuf; + size_t len; + struct bio_vec *bvec; + unsigned int nr_bvecs; +}; + +struct fixed_file_table { + struct file **files; +}; + +struct fixed_file_ref_node { + struct percpu_ref refs; + struct list_head node; + struct list_head file_list; + struct fixed_file_data *file_data; + struct llist_node llist; +}; + +struct fixed_file_data { + struct fixed_file_table *table; + struct io_ring_ctx *ctx; + + struct percpu_ref *cur_refs; + struct percpu_ref refs; + struct completion done; + struct list_head ref_list; + spinlock_t lock; +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __s32 len; + __u16 bid; +}; + +struct io_ring_ctx { + struct { + struct percpu_ref refs; + } ____cacheline_aligned_in_smp; + + struct { + unsigned int flags; + unsigned int compat: 1; + unsigned int limit_mem: 1; + unsigned int cq_overflow_flushed: 1; + unsigned int drain_next: 1; + unsigned int eventfd_async: 1; + + /* + * Ring buffer of indices into array of io_uring_sqe, which is + * mmapped by the application using the IORING_OFF_SQES offset. + * + * This indirection could e.g. be used to assign fixed + * io_uring_sqe entries to operations and only submit them to + * the queue when needed. + * + * The kernel modifies neither the indices array nor the entries + * array. + */ + u32 *sq_array; + unsigned cached_sq_head; + unsigned sq_entries; + unsigned sq_mask; + unsigned sq_thread_idle; + unsigned cached_sq_dropped; + atomic_t cached_cq_overflow; + unsigned long sq_check_overflow; + + struct list_head defer_list; + struct list_head timeout_list; + struct list_head cq_overflow_list; + + wait_queue_head_t inflight_wait; + struct io_uring_sqe *sq_sqes; + } ____cacheline_aligned_in_smp; + + struct io_rings *rings; + + /* IO offload */ + struct io_wq *io_wq; + struct task_struct *sqo_thread; /* if using sq thread polling */ + struct mm_struct *sqo_mm; + wait_queue_head_t sqo_wait; + + /* + * If used, fixed file set. Writers must ensure that ->refs is dead, + * readers must ensure that ->refs is alive as long as the file* is + * used. Only updated through io_uring_register(2). + */ + struct fixed_file_data *file_data; + unsigned nr_user_files; + int ring_fd; + struct file *ring_file; + + /* if used, fixed mapped user buffers */ + unsigned nr_user_bufs; + struct io_mapped_ubuf *user_bufs; + + struct user_struct *user; + + const struct cred *creds; + + struct completion ref_comp; + struct completion sq_thread_comp; + + /* if all else fails... */ + struct io_kiocb *fallback_req; + +#if defined(CONFIG_UNIX) + struct socket *ring_sock; +#endif + + struct idr io_buffer_idr; + + struct idr personality_idr; + + struct { + unsigned cached_cq_tail; + unsigned cq_entries; + unsigned cq_mask; + atomic_t cq_timeouts; + unsigned long cq_check_overflow; + struct wait_queue_head cq_wait; + struct fasync_struct *cq_fasync; + struct eventfd_ctx *cq_ev_fd; + } ____cacheline_aligned_in_smp; + + struct { + struct mutex uring_lock; + wait_queue_head_t wait; + } ____cacheline_aligned_in_smp; + + struct { + spinlock_t completion_lock; + + /* + * ->poll_list is protected by the ctx->uring_lock for + * io_uring instances that don't use IORING_SETUP_SQPOLL. + * For SQPOLL, only the single threaded io_sq_thread() will + * manipulate the list, hence no extra locking is needed there. + */ + struct list_head poll_list; + struct hlist_head *cancel_hash; + unsigned cancel_hash_bits; + bool poll_multi_file; + + spinlock_t inflight_lock; + struct list_head inflight_list; + } ____cacheline_aligned_in_smp; + + struct delayed_work file_put_work; + struct llist_head file_put_llist; + + struct work_struct exit_work; +}; + +/* + * First field must be the file pointer in all the + * iocb unions! See also 'struct kiocb' in + */ +struct io_poll_iocb { + struct file *file; + union { + struct wait_queue_head *head; + u64 addr; + }; + __poll_t events; + bool done; + bool canceled; + struct wait_queue_entry wait; +}; + +struct io_close { + struct file *file; + struct file *put_file; + int fd; +}; + +struct io_timeout_data { + struct io_kiocb *req; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; +}; + +struct io_accept { + struct file *file; + struct sockaddr __user *addr; + int __user *addr_len; + int flags; + unsigned long nofile; +}; + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +struct io_cancel { + struct file *file; + u64 addr; +}; + +struct io_timeout { + struct file *file; + u64 addr; + int flags; + u32 off; + u32 target_seq; +}; + +struct io_rw { + /* NOTE: kiocb has the file as the first member, so don't do it here */ + struct kiocb kiocb; + u64 addr; + u64 len; +}; + +struct io_connect { + struct file *file; + struct sockaddr __user *addr; + int addr_len; +}; + +struct io_sr_msg { + struct file *file; + union { + struct user_msghdr __user *msg; + void __user *buf; + }; + int msg_flags; + int bgid; + size_t len; + struct io_buffer *kbuf; +}; + +struct io_open { + struct file *file; + int dfd; + struct filename *filename; + struct open_how how; + unsigned long nofile; +}; + +struct io_files_update { + struct file *file; + u64 arg; + u32 nr_args; + u32 offset; +}; + +struct io_fadvise { + struct file *file; + u64 offset; + u32 len; + u32 advice; +}; + +struct io_madvise { + struct file *file; + u64 addr; + u32 len; + u32 advice; +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +}; + +struct io_splice { + struct file *file_out; + struct file *file_in; + loff_t off_out; + loff_t off_in; + u64 len; + unsigned int flags; +}; + +struct io_provide_buf { + struct file *file; + __u64 addr; + __s32 len; + __u32 bgid; + __u16 nbufs; + __u16 bid; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + const char __user *filename; + struct statx __user *buffer; +}; + +struct io_async_connect { + struct sockaddr_storage address; +}; + +struct io_async_msghdr { + struct iovec fast_iov[UIO_FASTIOV]; + struct iovec *iov; + struct sockaddr __user *uaddr; + struct msghdr msg; + struct sockaddr_storage addr; +}; + +struct io_async_rw { + struct iovec fast_iov[UIO_FASTIOV]; + struct iovec *iov; + ssize_t nr_segs; + ssize_t size; + struct wait_page_queue wpq; + struct callback_head task_work; +}; + +struct io_async_ctx { + union { + struct io_async_rw rw; + struct io_async_msghdr msg; + struct io_async_connect connect; + struct io_timeout_data timeout; + }; +}; + +enum { + REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, + REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, + REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, + REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, + REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, + REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, + + REQ_F_LINK_HEAD_BIT, + REQ_F_FAIL_LINK_BIT, + REQ_F_INFLIGHT_BIT, + REQ_F_CUR_POS_BIT, + REQ_F_NOWAIT_BIT, + REQ_F_LINK_TIMEOUT_BIT, + REQ_F_ISREG_BIT, + REQ_F_COMP_LOCKED_BIT, + REQ_F_NEED_CLEANUP_BIT, + REQ_F_OVERFLOW_BIT, + REQ_F_POLLED_BIT, + REQ_F_BUFFER_SELECTED_BIT, + REQ_F_NO_FILE_TABLE_BIT, + REQ_F_WORK_INITIALIZED_BIT, + REQ_F_TASK_PINNED_BIT, + + /* not a real bit, just to check we're not overflowing the space */ + __REQ_F_LAST_BIT, +}; + +enum { + /* ctx owns file */ + REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT), + /* drain existing IO first */ + REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT), + /* linked sqes */ + REQ_F_LINK = BIT(REQ_F_LINK_BIT), + /* doesn't sever on completion < 0 */ + REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT), + /* IOSQE_ASYNC */ + REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT), + /* IOSQE_BUFFER_SELECT */ + REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT), + + /* head of a link */ + REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT), + /* fail rest of links */ + REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT), + /* on inflight list */ + REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), + /* read/write uses file position */ + REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), + /* must not punt to workers */ + REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), + /* has linked timeout */ + REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), + /* regular file */ + REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), + /* completion under lock */ + REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT), + /* needs cleanup */ + REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), + /* in overflow list */ + REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT), + /* already went through poll handler */ + REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), + /* buffer already selected */ + REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), + /* doesn't need file table for this request */ + REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), + /* io_wq_work is initialized */ + REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT), + /* req->task is refcounted */ + REQ_F_TASK_PINNED = BIT(REQ_F_TASK_PINNED_BIT), +}; + +struct async_poll { + struct io_poll_iocb poll; + struct io_wq_work work; +}; + +/* + * NOTE! Each of the iocb union members has the file pointer + * as the first entry in their struct definition. So you can + * access the file pointer through any of the sub-structs, + * or directly as just 'ki_filp' in this struct. + */ +struct io_kiocb { + union { + struct file *file; + struct io_rw rw; + struct io_poll_iocb poll; + struct io_accept accept; + struct io_sync sync; + struct io_cancel cancel; + struct io_timeout timeout; + struct io_connect connect; + struct io_sr_msg sr_msg; + struct io_open open; + struct io_close close; + struct io_files_update files_update; + struct io_fadvise fadvise; + struct io_madvise madvise; + struct io_epoll epoll; + struct io_splice splice; + struct io_provide_buf pbuf; + struct io_statx statx; + }; + + struct io_async_ctx *io; + int cflags; + u8 opcode; + /* polled IO has completed */ + u8 iopoll_completed; + + u16 buf_index; + + struct io_ring_ctx *ctx; + struct list_head list; + unsigned int flags; + refcount_t refs; + struct task_struct *task; + unsigned long fsize; + u64 user_data; + u32 result; + u32 sequence; + + struct list_head link_list; + + struct list_head inflight_entry; + + struct percpu_ref *fixed_file_refs; + + union { + /* + * Only commands that never go async can use the below fields, + * obviously. Right now only IORING_OP_POLL_ADD uses them, and + * async armed poll handlers for regular commands. The latter + * restore the work, if needed. + */ + struct { + struct hlist_node hash_node; + struct async_poll *apoll; + }; + struct io_wq_work work; + }; + struct callback_head task_work; +}; + +#define IO_IOPOLL_BATCH 8 + +struct io_comp_state { + unsigned int nr; + struct list_head list; + struct io_ring_ctx *ctx; +}; + +struct io_submit_state { + struct blk_plug plug; + + /* + * io_kiocb alloc cache + */ + void *reqs[IO_IOPOLL_BATCH]; + unsigned int free_reqs; + + /* + * Batch completion logic + */ + struct io_comp_state comp; + + /* + * File reference cache + */ + struct file *file; + unsigned int fd; + unsigned int has_refs; + unsigned int used_refs; + unsigned int ios_left; +}; + +struct io_op_def { + /* needs req->io allocated for deferral/async */ + unsigned async_ctx : 1; + /* needs current->mm setup, does mm access */ + unsigned needs_mm : 1; + /* needs req->file assigned */ + unsigned needs_file : 1; + /* don't fail if file grab fails */ + unsigned needs_file_no_error : 1; + /* hash wq insertion if file is a regular file */ + unsigned hash_reg_file : 1; + /* unbound wq insertion if file is a non-regular file */ + unsigned unbound_nonreg_file : 1; + /* opcode is not supported by this kernel */ + unsigned not_supported : 1; + /* needs file table */ + unsigned file_table : 1; + /* needs ->fs */ + unsigned needs_fs : 1; + /* set if opcode supports polled "wait" */ + unsigned pollin : 1; + unsigned pollout : 1; + /* op supports buffer selection */ + unsigned buffer_select : 1; +}; + +static const struct io_op_def io_op_defs[] = { + [IORING_OP_NOP] = {}, + [IORING_OP_READV] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollin = 1, + .buffer_select = 1, + }, + [IORING_OP_WRITEV] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_FSYNC] = { + .needs_file = 1, + }, + [IORING_OP_READ_FIXED] = { + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollin = 1, + }, + [IORING_OP_WRITE_FIXED] = { + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_POLL_ADD] = { + .needs_file = 1, + .unbound_nonreg_file = 1, + }, + [IORING_OP_POLL_REMOVE] = {}, + [IORING_OP_SYNC_FILE_RANGE] = { + .needs_file = 1, + }, + [IORING_OP_SENDMSG] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .needs_fs = 1, + .pollout = 1, + }, + [IORING_OP_RECVMSG] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .needs_fs = 1, + .pollin = 1, + .buffer_select = 1, + }, + [IORING_OP_TIMEOUT] = { + .async_ctx = 1, + .needs_mm = 1, + }, + [IORING_OP_TIMEOUT_REMOVE] = {}, + [IORING_OP_ACCEPT] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .file_table = 1, + .pollin = 1, + }, + [IORING_OP_ASYNC_CANCEL] = {}, + [IORING_OP_LINK_TIMEOUT] = { + .async_ctx = 1, + .needs_mm = 1, + }, + [IORING_OP_CONNECT] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_FALLOCATE] = { + .needs_file = 1, + }, + [IORING_OP_OPENAT] = { + .file_table = 1, + .needs_fs = 1, + }, + [IORING_OP_CLOSE] = { + .needs_file = 1, + .needs_file_no_error = 1, + .file_table = 1, + }, + [IORING_OP_FILES_UPDATE] = { + .needs_mm = 1, + .file_table = 1, + }, + [IORING_OP_STATX] = { + .needs_mm = 1, + .needs_fs = 1, + .file_table = 1, + }, + [IORING_OP_READ] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollin = 1, + .buffer_select = 1, + }, + [IORING_OP_WRITE] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_FADVISE] = { + .needs_file = 1, + }, + [IORING_OP_MADVISE] = { + .needs_mm = 1, + }, + [IORING_OP_SEND] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_RECV] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollin = 1, + .buffer_select = 1, + }, + [IORING_OP_OPENAT2] = { + .file_table = 1, + .needs_fs = 1, + }, + [IORING_OP_EPOLL_CTL] = { + .unbound_nonreg_file = 1, + .file_table = 1, + }, + [IORING_OP_SPLICE] = { + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + }, + [IORING_OP_PROVIDE_BUFFERS] = {}, + [IORING_OP_REMOVE_BUFFERS] = {}, + [IORING_OP_TEE] = { + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + }, +}; + +enum io_mem_account { + ACCT_LOCKED, + ACCT_PINNED, +}; + +static bool io_rw_reissue(struct io_kiocb *req, long res); +static void io_cqring_fill_event(struct io_kiocb *req, long res); +static void io_put_req(struct io_kiocb *req); +static void io_double_put_req(struct io_kiocb *req); +static void __io_double_put_req(struct io_kiocb *req); +static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); +static void io_queue_linked_timeout(struct io_kiocb *req); +static int __io_sqe_files_update(struct io_ring_ctx *ctx, + struct io_uring_files_update *ip, + unsigned nr_args); +static int io_grab_files(struct io_kiocb *req); +static void io_complete_rw_common(struct kiocb *kiocb, long res, + struct io_comp_state *cs); +static void io_cleanup_req(struct io_kiocb *req); +static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, + int fd, struct file **out_file, bool fixed); +static void __io_queue_sqe(struct io_kiocb *req, + const struct io_uring_sqe *sqe, + struct io_comp_state *cs); + +static ssize_t io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter, + bool needs_lock); +static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, + struct iovec *iovec, struct iovec *fast_iov, + struct iov_iter *iter); + +static struct kmem_cache *req_cachep; + +static const struct file_operations io_uring_fops; + +struct sock *io_uring_get_socket(struct file *file) +{ +#if defined(CONFIG_UNIX) + if (file->f_op == &io_uring_fops) { + struct io_ring_ctx *ctx = file->private_data; + + return ctx->ring_sock->sk; + } +#endif + return NULL; +} +EXPORT_SYMBOL(io_uring_get_socket); + +/* + * Must only be used if we don't need to care about links, usually from + * within the completion handling itself. + */ +static void __io_double_put_req(struct io_kiocb *req) +{ + /* drop both submit and complete references */ + if (refcount_sub_and_test(2, &req->refs)) + __io_free_req(req); +} + +static void io_double_put_req(struct io_kiocb *req) +{ + /* drop both submit and complete references */ + if (refcount_sub_and_test(2, &req->refs)) + io_free_req(req); +} + +static inline void req_set_fail_links(struct io_kiocb *req) +{ + if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; +} + +static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) +{ + if (!ctx->cq_ev_fd) + return false; + if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) + return false; + if (!ctx->eventfd_async) + return true; + return io_wq_current_is_worker(); +} + +static void io_cqring_ev_posted(struct io_ring_ctx *ctx) +{ + if (waitqueue_active(&ctx->wait)) + wake_up(&ctx->wait); + if (waitqueue_active(&ctx->sqo_wait)) + wake_up(&ctx->sqo_wait); + if (io_should_trigger_evfd(ctx)) + eventfd_signal(ctx->cq_ev_fd, 1); +} + +static void __io_queue_async_work(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *link = io_prep_linked_timeout(req); + + trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, + &req->work, req->flags); + io_wq_enqueue(ctx->io_wq, &req->work); + + if (link) + io_queue_linked_timeout(link); +} + +static void __io_queue_deferred(struct io_ring_ctx *ctx) +{ + do { + struct io_kiocb *req = list_first_entry(&ctx->defer_list, + struct io_kiocb, list); + + if (req_need_defer(req)) + break; + list_del_init(&req->list); + /* punt-init is done before queueing for defer */ + __io_queue_async_work(req); + } while (!list_empty(&ctx->defer_list)); +} + +static inline bool io_is_timeout_noseq(struct io_kiocb *req) +{ + return !req->timeout.off; +} + +static void __io_commit_cqring(struct io_ring_ctx *ctx) +{ + struct io_rings *rings = ctx->rings; + + /* order cqe stores with ring update */ + smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); + + if (wq_has_sleeper(&ctx->cq_wait)) { + wake_up_interruptible(&ctx->cq_wait); + kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); + } +} + +static void io_flush_timeouts(struct io_ring_ctx *ctx) +{ + while (!list_empty(&ctx->timeout_list)) { + struct io_kiocb *req = list_first_entry(&ctx->timeout_list, + struct io_kiocb, list); + + if (io_is_timeout_noseq(req)) + break; + if (req->timeout.target_seq != ctx->cached_cq_tail + - atomic_read(&ctx->cq_timeouts)) + break; + + list_del_init(&req->list); + io_kill_timeout(req); + } +} + +static void io_commit_cqring(struct io_ring_ctx *ctx) +{ + io_flush_timeouts(ctx); + __io_commit_cqring(ctx); + + if (unlikely(!list_empty(&ctx->defer_list))) + __io_queue_deferred(ctx); +} + +static void __io_req_task_cancel(struct io_kiocb *req, int error) +{ + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->completion_lock); + io_cqring_fill_event(req, error); + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); + req_set_fail_links(req); + io_double_put_req(req); +} + +static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) +{ + if (!current->mm) { + if (unlikely(!ctx->sqo_mm || !mmget_not_zero(ctx->sqo_mm))) + return -EFAULT; + kthread_use_mm(ctx->sqo_mm); + } + + return 0; +} + +static void __io_req_task_submit(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!__io_sq_thread_acquire_mm(ctx)) { + mutex_lock(&ctx->uring_lock); + __io_queue_sqe(req, NULL, NULL); + mutex_unlock(&ctx->uring_lock); + } else { + __io_req_task_cancel(req, -EFAULT); + } +} + +static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) + __acquires(&req->ctx->completion_lock) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!req->result && !READ_ONCE(poll->canceled)) { + struct poll_table_struct pt = { ._key = poll->events }; + + req->result = vfs_poll(req->file, &pt) & poll->events; + } + + spin_lock_irq(&ctx->completion_lock); + if (!req->result && !READ_ONCE(poll->canceled)) { + add_wait_queue(poll->head, &poll->wait); + return true; + } + + return false; +} + +static void io_async_task_func(struct callback_head *cb) +{ + struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct async_poll *apoll = req->apoll; + struct io_ring_ctx *ctx = req->ctx; + + trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); + + if (io_poll_rewait(req, &apoll->poll)) { + spin_unlock_irq(&ctx->completion_lock); + return; + } + + /* If req is still hashed, it cannot have been canceled. Don't check. */ + if (hash_hashed(&req->hash_node)) + hash_del(&req->hash_node); + + spin_unlock_irq(&ctx->completion_lock); + + /* restore ->work in case we need to retry again */ + if (req->flags & REQ_F_WORK_INITIALIZED) + memcpy(&req->work, &apoll->work, sizeof(req->work)); + + if (!READ_ONCE(apoll->poll.canceled)) + __io_req_task_submit(req); + else + __io_req_task_cancel(req, -ECANCELED); + + kfree(apoll); +} diff --git a/test/aa340845ae6.i b/test/aa340845ae6.i new file mode 100644 index 0000000..f2f80e5 --- /dev/null +++ b/test/aa340845ae6.i @@ -0,0 +1,105721 @@ +# 1 "fs/io_uring.c" +# 1 "" +# 1 "" +# 1 "././include/linux/kconfig.h" 1 + + + + + + +# 1 "./include/generated/autoconf.h" 1 +# 8 "././include/linux/kconfig.h" 2 +# 1 "" 2 +# 1 "././include/linux/compiler_types.h" 1 +# 59 "././include/linux/compiler_types.h" +# 1 "./include/linux/compiler_attributes.h" 1 +# 60 "././include/linux/compiler_types.h" 2 +# 68 "././include/linux/compiler_types.h" +# 1 "./include/linux/compiler-gcc.h" 1 +# 69 "././include/linux/compiler_types.h" 2 +# 85 "././include/linux/compiler_types.h" +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + unsigned long miss_hit[2]; + }; +}; + +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; +# 1 "" 2 +# 1 "fs/io_uring.c" +# 42 "fs/io_uring.c" +# 1 "./include/linux/kernel.h" 1 + + + + + +# 1 "/usr/lib/gcc/x86_64-linux-gnu/9/include/stdarg.h" 1 3 4 +# 40 "/usr/lib/gcc/x86_64-linux-gnu/9/include/stdarg.h" 3 4 + +# 40 "/usr/lib/gcc/x86_64-linux-gnu/9/include/stdarg.h" 3 4 +typedef __builtin_va_list __gnuc_va_list; +# 99 "/usr/lib/gcc/x86_64-linux-gnu/9/include/stdarg.h" 3 4 +typedef __gnuc_va_list va_list; +# 7 "./include/linux/kernel.h" 2 +# 1 "./include/linux/limits.h" 1 + + + + +# 1 "./include/uapi/linux/limits.h" 1 +# 6 "./include/linux/limits.h" 2 +# 1 "./include/linux/types.h" 1 + + + + + +# 1 "./include/uapi/linux/types.h" 1 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 1 "./include/uapi/asm-generic/types.h" 1 + + + + + + +# 1 "./include/asm-generic/int-ll64.h" 1 +# 11 "./include/asm-generic/int-ll64.h" +# 1 "./include/uapi/asm-generic/int-ll64.h" 1 +# 12 "./include/uapi/asm-generic/int-ll64.h" +# 1 "./arch/x86/include/uapi/asm/bitsperlong.h" 1 +# 11 "./arch/x86/include/uapi/asm/bitsperlong.h" +# 1 "./include/asm-generic/bitsperlong.h" 1 + + + + +# 1 "./include/uapi/asm-generic/bitsperlong.h" 1 +# 6 "./include/asm-generic/bitsperlong.h" 2 +# 12 "./arch/x86/include/uapi/asm/bitsperlong.h" 2 +# 13 "./include/uapi/asm-generic/int-ll64.h" 2 + + + + + + + + +# 20 "./include/uapi/asm-generic/int-ll64.h" +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + + +__extension__ typedef __signed__ long long __s64; +__extension__ typedef unsigned long long __u64; +# 12 "./include/asm-generic/int-ll64.h" 2 + + + + +typedef __s8 s8; +typedef __u8 u8; +typedef __s16 s16; +typedef __u16 u16; +typedef __s32 s32; +typedef __u32 u32; +typedef __s64 s64; +typedef __u64 u64; +# 8 "./include/uapi/asm-generic/types.h" 2 +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 2 +# 6 "./include/uapi/linux/types.h" 2 +# 14 "./include/uapi/linux/types.h" +# 1 "./include/uapi/linux/posix_types.h" 1 + + + + +# 1 "./include/linux/stddef.h" 1 + + + + +# 1 "./include/uapi/linux/stddef.h" 1 + +# 1 "./include/linux/compiler_types.h" 1 +# 3 "./include/uapi/linux/stddef.h" 2 +# 6 "./include/linux/stddef.h" 2 + + + + +enum { + false = 0, + true = 1 +}; +# 6 "./include/uapi/linux/posix_types.h" 2 +# 25 "./include/uapi/linux/posix_types.h" +typedef struct { + unsigned long fds_bits[1024 / (8 * sizeof(long))]; +} __kernel_fd_set; + + +typedef void (*__kernel_sighandler_t)(int); + + +typedef int __kernel_key_t; +typedef int __kernel_mqd_t; + +# 1 "./arch/x86/include/asm/posix_types.h" 1 + + + + +# 1 "./arch/x86/include/uapi/asm/posix_types_64.h" 1 +# 11 "./arch/x86/include/uapi/asm/posix_types_64.h" +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; + + +typedef unsigned long __kernel_old_dev_t; + + +# 1 "./include/uapi/asm-generic/posix_types.h" 1 +# 15 "./include/uapi/asm-generic/posix_types.h" +typedef long __kernel_long_t; +typedef unsigned long __kernel_ulong_t; + + + +typedef __kernel_ulong_t __kernel_ino_t; + + + +typedef unsigned int __kernel_mode_t; + + + +typedef int __kernel_pid_t; + + + +typedef int __kernel_ipc_pid_t; + + + +typedef unsigned int __kernel_uid_t; +typedef unsigned int __kernel_gid_t; + + + +typedef __kernel_long_t __kernel_suseconds_t; + + + +typedef int __kernel_daddr_t; + + + +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; +# 72 "./include/uapi/asm-generic/posix_types.h" +typedef __kernel_ulong_t __kernel_size_t; +typedef __kernel_long_t __kernel_ssize_t; +typedef __kernel_long_t __kernel_ptrdiff_t; + + + + +typedef struct { + int val[2]; +} __kernel_fsid_t; + + + + + +typedef __kernel_long_t __kernel_off_t; +typedef long long __kernel_loff_t; +typedef __kernel_long_t __kernel_old_time_t; + + + +typedef long long __kernel_time64_t; +typedef __kernel_long_t __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef char * __kernel_caddr_t; +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +# 19 "./arch/x86/include/uapi/asm/posix_types_64.h" 2 +# 6 "./arch/x86/include/asm/posix_types.h" 2 +# 37 "./include/uapi/linux/posix_types.h" 2 +# 15 "./include/uapi/linux/types.h" 2 +# 29 "./include/uapi/linux/types.h" +typedef __u16 __le16; +typedef __u16 __be16; +typedef __u32 __le32; +typedef __u32 __be32; +typedef __u64 __le64; +typedef __u64 __be64; + +typedef __u16 __sum16; +typedef __u32 __wsum; +# 52 "./include/uapi/linux/types.h" +typedef unsigned __poll_t; +# 7 "./include/linux/types.h" 2 + + + + + + +typedef u32 __kernel_dev_t; + +typedef __kernel_fd_set fd_set; +typedef __kernel_dev_t dev_t; +typedef __kernel_ino_t ino_t; +typedef __kernel_mode_t mode_t; +typedef unsigned short umode_t; +typedef u32 nlink_t; +typedef __kernel_off_t off_t; +typedef __kernel_pid_t pid_t; +typedef __kernel_daddr_t daddr_t; +typedef __kernel_key_t key_t; +typedef __kernel_suseconds_t suseconds_t; +typedef __kernel_timer_t timer_t; +typedef __kernel_clockid_t clockid_t; +typedef __kernel_mqd_t mqd_t; + +typedef _Bool bool; + +typedef __kernel_uid32_t uid_t; +typedef __kernel_gid32_t gid_t; +typedef __kernel_uid16_t uid16_t; +typedef __kernel_gid16_t gid16_t; + +typedef unsigned long uintptr_t; + + + +typedef __kernel_old_uid_t old_uid_t; +typedef __kernel_old_gid_t old_gid_t; + + + +typedef __kernel_loff_t loff_t; +# 55 "./include/linux/types.h" +typedef __kernel_size_t size_t; + + + + +typedef __kernel_ssize_t ssize_t; + + + + +typedef __kernel_ptrdiff_t ptrdiff_t; + + + + +typedef __kernel_clock_t clock_t; + + + + +typedef __kernel_caddr_t caddr_t; + + + +typedef unsigned char u_char; +typedef unsigned short u_short; +typedef unsigned int u_int; +typedef unsigned long u_long; + + +typedef unsigned char unchar; +typedef unsigned short ushort; +typedef unsigned int uint; +typedef unsigned long ulong; + + + + +typedef u8 u_int8_t; +typedef s8 int8_t; +typedef u16 u_int16_t; +typedef s16 int16_t; +typedef u32 u_int32_t; +typedef s32 int32_t; + + + +typedef u8 uint8_t; +typedef u16 uint16_t; +typedef u32 uint32_t; + + +typedef u64 uint64_t; +typedef u64 u_int64_t; +typedef s64 int64_t; +# 125 "./include/linux/types.h" +typedef u64 sector_t; +typedef u64 blkcnt_t; +# 143 "./include/linux/types.h" +typedef u64 dma_addr_t; + + + + +typedef unsigned int gfp_t; +typedef unsigned int slab_flags_t; +typedef unsigned int fmode_t; + + +typedef u64 phys_addr_t; + + + + +typedef phys_addr_t resource_size_t; + + + + + +typedef unsigned long irq_hw_number_t; + +typedef struct { + int counter; +} atomic_t; + + +typedef struct { + s64 counter; +} atomic64_t; + + +struct list_head { + struct list_head *next, *prev; +}; + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +struct ustat { + __kernel_daddr_t f_tfree; + __kernel_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; +# 214 "./include/linux/types.h" +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *head); +} __attribute__((aligned(sizeof(void *)))); + + +typedef void (*rcu_callback_t)(struct callback_head *head); +typedef void (*call_rcu_func_t)(struct callback_head *head, rcu_callback_t func); + +typedef void (*swap_func_t)(void *a, void *b, int size); + +typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); +typedef int (*cmp_func_t)(const void *a, const void *b); +# 7 "./include/linux/limits.h" 2 +# 1 "./include/vdso/limits.h" 1 +# 8 "./include/linux/limits.h" 2 +# 8 "./include/linux/kernel.h" 2 +# 1 "./include/linux/linkage.h" 1 + + + + + +# 1 "./include/linux/stringify.h" 1 +# 7 "./include/linux/linkage.h" 2 +# 1 "./include/linux/export.h" 1 +# 43 "./include/linux/export.h" +# 1 "./include/linux/compiler.h" 1 +# 251 "./include/linux/compiler.h" +# 1 "./arch/x86/include/asm/barrier.h" 1 + + + + +# 1 "./arch/x86/include/asm/alternative.h" 1 +# 10 "./arch/x86/include/asm/alternative.h" +# 1 "./arch/x86/include/asm/asm.h" 1 +# 176 "./arch/x86/include/asm/asm.h" +register unsigned long current_stack_pointer asm("rsp"); +# 11 "./arch/x86/include/asm/alternative.h" 2 +# 58 "./arch/x86/include/asm/alternative.h" +struct alt_instr { + s32 instr_offset; + s32 repl_offset; + u16 cpuid; + u8 instrlen; + u8 replacementlen; + u8 padlen; +} __attribute__((__packed__)); + + + + + +extern int alternatives_patched; + +extern void alternative_instructions(void); +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); + +struct module; + + +extern void alternatives_smp_module_add(struct module *mod, char *name, + void *locks, void *locks_end, + void *text, void *text_end); +extern void alternatives_smp_module_del(struct module *mod); +extern void alternatives_enable_smp(void); +extern int alternatives_text_reserved(void *start, void *end); +extern bool skip_smp_alternatives; +# 6 "./arch/x86/include/asm/barrier.h" 2 +# 1 "./arch/x86/include/asm/nops.h" 1 +# 143 "./arch/x86/include/asm/nops.h" +extern const unsigned char * const *ideal_nops; +extern void arch_init_ideal_nops(void); +# 7 "./arch/x86/include/asm/barrier.h" 2 +# 36 "./arch/x86/include/asm/barrier.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + asm volatile ("cmp %1,%2; sbb %0,%0;" + :"=r" (mask) + :"g"(size),"r" (index) + :"cc"); + return mask; +} +# 85 "./arch/x86/include/asm/barrier.h" +# 1 "./include/asm-generic/barrier.h" 1 +# 16 "./include/asm-generic/barrier.h" +# 1 "./include/linux/compiler.h" 1 +# 17 "./include/asm-generic/barrier.h" 2 +# 86 "./arch/x86/include/asm/barrier.h" 2 +# 252 "./include/linux/compiler.h" 2 +# 1 "./include/linux/kasan-checks.h" 1 +# 13 "./include/linux/kasan-checks.h" +bool __kasan_check_read(const volatile void *p, unsigned int size); +bool __kasan_check_write(const volatile void *p, unsigned int size); +# 253 "./include/linux/compiler.h" 2 +# 1 "./include/linux/kcsan-checks.h" 1 +# 148 "./include/linux/kcsan-checks.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void __kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_disable_current(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_enable_current(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_enable_current_nowarn(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_nestable_atomic_begin(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_nestable_atomic_end(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_flat_atomic_begin(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_flat_atomic_end(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_atomic_next(int n) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_set_access_mask(unsigned long mask) { } + +struct kcsan_scoped_access { }; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa) { return sa; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } +# 184 "./include/linux/kcsan-checks.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void __kcsan_enable_current(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void __kcsan_disable_current(void) { } +# 254 "./include/linux/compiler.h" 2 +# 306 "./include/linux/compiler.h" +static __attribute__((no_sanitize_address)) __attribute__((__no_instrument_function__)) __attribute__((__unused__)) +unsigned long __read_once_word_nocheck(const void *addr) +{ + return (*(const volatile typeof( _Generic((*(unsigned long *)addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*(unsigned long *)addr))) *)&(*(unsigned long *)addr)); +} +# 327 "./include/linux/compiler.h" +static __attribute__((no_sanitize_address)) __attribute__((__no_instrument_function__)) __attribute__((__unused__)) +unsigned long read_word_at_a_time(const void *addr) +{ + __kasan_check_read(addr, 1); + return *(unsigned long *)addr; +} +# 350 "./include/linux/compiler.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((__no_instrument_function__)) void *offset_to_ptr(const int *off) +{ + return (void *)((unsigned long)off + *off); +} +# 44 "./include/linux/export.h" 2 +# 60 "./include/linux/export.h" +struct kernel_symbol { + int value_offset; + int name_offset; + int namespace_offset; +}; +# 8 "./include/linux/linkage.h" 2 +# 1 "./arch/x86/include/asm/linkage.h" 1 +# 9 "./include/linux/linkage.h" 2 +# 9 "./include/linux/kernel.h" 2 + + + +# 1 "./include/linux/bitops.h" 1 + + + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 5 "./include/linux/bitops.h" 2 +# 1 "./include/linux/bits.h" 1 + + + + +# 1 "./include/linux/const.h" 1 + + + +# 1 "./include/vdso/const.h" 1 + + + + +# 1 "./include/uapi/linux/const.h" 1 +# 6 "./include/vdso/const.h" 2 +# 5 "./include/linux/const.h" 2 +# 6 "./include/linux/bits.h" 2 +# 1 "./include/vdso/bits.h" 1 +# 7 "./include/linux/bits.h" 2 +# 23 "./include/linux/bits.h" +# 1 "./include/linux/build_bug.h" 1 +# 24 "./include/linux/bits.h" 2 +# 6 "./include/linux/bitops.h" 2 +# 20 "./include/linux/bitops.h" +extern unsigned int __sw_hweight8(unsigned int w); +extern unsigned int __sw_hweight16(unsigned int w); +extern unsigned int __sw_hweight32(unsigned int w); +extern unsigned long __sw_hweight64(__u64 w); + + + + + +# 1 "./arch/x86/include/asm/bitops.h" 1 +# 18 "./arch/x86/include/asm/bitops.h" +# 1 "./arch/x86/include/asm/rmwcc.h" 1 +# 19 "./arch/x86/include/asm/bitops.h" 2 +# 51 "./arch/x86/include/asm/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch_set_bit(long nr, volatile unsigned long *addr) +{ + if (__builtin_constant_p(nr)) { + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "orb %b1,%0" + : "+m" (*(volatile char *) ((void *)(addr) + ((nr)>>3))) + : "iq" ((1 << ((nr) & 7))) + : "memory"); + } else { + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btsq" " " " %1,%0" + : : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch___set_bit(long nr, volatile unsigned long *addr) +{ + asm volatile(" " "btsq" " " " %1,%0" : : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch_clear_bit(long nr, volatile unsigned long *addr) +{ + if (__builtin_constant_p(nr)) { + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "andb %b1,%0" + : "+m" (*(volatile char *) ((void *)(addr) + ((nr)>>3))) + : "iq" (~(1 << ((nr) & 7)))); + } else { + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btrq" " " " %1,%0" + : : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch_clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + __asm__ __volatile__("": : :"memory"); + arch_clear_bit(nr, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch___clear_bit(long nr, volatile unsigned long *addr) +{ + asm volatile(" " "btrq" " " " %1,%0" : : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +{ + bool negative; + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "andb %2,%1" + "\n\t/* output condition code " "s" "*/\n" + : "=@cc" "s" (negative), "+m" (*(volatile char *) (addr)) + : "ir" ((char) ~(1 << nr)) : "memory"); + return negative; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch___clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + arch___clear_bit(nr, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch___change_bit(long nr, volatile unsigned long *addr) +{ + asm volatile(" " "btcq" " " " %1,%0" : : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch_change_bit(long nr, volatile unsigned long *addr) +{ + if (__builtin_constant_p(nr)) { + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xorb %b1,%0" + : "+m" (*(volatile char *) ((void *)(addr) + ((nr)>>3))) + : "iq" ((1 << ((nr) & 7)))); + } else { + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btcq" " " " %1,%0" + : : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_test_and_set_bit(long nr, volatile unsigned long *addr) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btsq" " " " %[val], " "%[var]" "\n\t/* output condition code " "c" "*/\n" : [var] "+m" (*addr), "=@cc" "c" (c) : [val] "Ir" (nr) : "memory"); c; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr) +{ + return arch_test_and_set_bit(nr, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch___test_and_set_bit(long nr, volatile unsigned long *addr) +{ + bool oldbit; + + asm(" " "btsq" " " " %2,%1" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (oldbit) + : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); + return oldbit; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btrq" " " " %[val], " "%[var]" "\n\t/* output condition code " "c" "*/\n" : [var] "+m" (*addr), "=@cc" "c" (c) : [val] "Ir" (nr) : "memory"); c; }); +} +# 173 "./arch/x86/include/asm/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch___test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + bool oldbit; + + asm volatile(" " "btrq" " " " %2,%1" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (oldbit) + : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); + return oldbit; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch___test_and_change_bit(long nr, volatile unsigned long *addr) +{ + bool oldbit; + + asm volatile(" " "btcq" " " " %2,%1" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (oldbit) + : "m" (*(volatile long *) (addr)), "Ir" (nr) : "memory"); + + return oldbit; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_test_and_change_bit(long nr, volatile unsigned long *addr) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " " " "btcq" " " " %[val], " "%[var]" "\n\t/* output condition code " "c" "*/\n" : [var] "+m" (*addr), "=@cc" "c" (c) : [val] "Ir" (nr) : "memory"); c; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool constant_test_bit(long nr, const volatile unsigned long *addr) +{ + + + + + return ((1UL << (nr & (64 -1))) & + (addr[nr >> 6])) != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool variable_test_bit(long nr, volatile const unsigned long *addr) +{ + bool oldbit; + + asm volatile(" " "btq" " " " %2,%1" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); + + return oldbit; +} +# 237 "./arch/x86/include/asm/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __ffs(unsigned long word) +{ + asm("rep; bsf %1,%0" + : "=r" (word) + : "rm" (word)); + return word; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long ffz(unsigned long word) +{ + asm("rep; bsf %1,%0" + : "=r" (word) + : "r" (~word)); + return word; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __fls(unsigned long word) +{ + asm("bsr %1,%0" + : "=r" (word) + : "rm" (word)); + return word; +} +# 287 "./arch/x86/include/asm/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int ffs(int x) +{ + int r; +# 301 "./arch/x86/include/asm/bitops.h" + asm("bsfl %1,%0" + : "=r" (r) + : "rm" (x), "0" (-1)); +# 314 "./arch/x86/include/asm/bitops.h" + return r + 1; +} +# 328 "./arch/x86/include/asm/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int fls(unsigned int x) +{ + int r; +# 342 "./arch/x86/include/asm/bitops.h" + asm("bsrl %1,%0" + : "=r" (r) + : "rm" (x), "0" (-1)); +# 355 "./arch/x86/include/asm/bitops.h" + return r + 1; +} +# 370 "./arch/x86/include/asm/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int fls64(__u64 x) +{ + int bitpos = -1; + + + + + + asm("bsrq %1,%q0" + : "+r" (bitpos) + : "rm" (x)); + return bitpos + 1; +} + + + + +# 1 "./include/asm-generic/bitops/find.h" 1 +# 15 "./include/asm-generic/bitops/find.h" +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long + size, unsigned long offset); +# 30 "./include/asm-generic/bitops/find.h" +extern unsigned long find_next_and_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long size, + unsigned long offset); +# 45 "./include/asm-generic/bitops/find.h" +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); +# 59 "./include/asm-generic/bitops/find.h" +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); +# 70 "./include/asm-generic/bitops/find.h" +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); +# 93 "./include/asm-generic/bitops/find.h" +extern unsigned long find_next_clump8(unsigned long *clump, + const unsigned long *addr, + unsigned long size, unsigned long offset); +# 388 "./arch/x86/include/asm/bitops.h" 2 + +# 1 "./include/asm-generic/bitops/sched.h" 1 + + + + + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 7 "./include/asm-generic/bitops/sched.h" 2 + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sched_find_first_bit(const unsigned long *b) +{ + + if (b[0]) + return __ffs(b[0]); + return __ffs(b[1]) + 64; +# 30 "./include/asm-generic/bitops/sched.h" +} +# 390 "./arch/x86/include/asm/bitops.h" 2 + +# 1 "./arch/x86/include/asm/arch_hweight.h" 1 + + + + +# 1 "./arch/x86/include/asm/cpufeatures.h" 1 + + + + + +# 1 "./arch/x86/include/asm/required-features.h" 1 +# 7 "./arch/x86/include/asm/cpufeatures.h" 2 + + + +# 1 "./arch/x86/include/asm/disabled-features.h" 1 +# 11 "./arch/x86/include/asm/cpufeatures.h" 2 +# 6 "./arch/x86/include/asm/arch_hweight.h" 2 +# 15 "./arch/x86/include/asm/arch_hweight.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned int __arch_hweight32(unsigned int w) +{ + unsigned int res; + + asm ("# ALT: oldnstr\n" "661:\n\t" "call __sw_hweight32" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 4*32+23)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "popcntl %1, %0" "\n" "665""1" ":\n" ".popsection\n" + : "=""a" (res) + : "D" (w)); + + return res; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight32(w & 0xffff); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight32(w & 0xff); +} +# 43 "./arch/x86/include/asm/arch_hweight.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __arch_hweight64(__u64 w) +{ + unsigned long res; + + asm ("# ALT: oldnstr\n" "661:\n\t" "call __sw_hweight64" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 4*32+23)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "popcntq %1, %0" "\n" "665""1" ":\n" ".popsection\n" + : "=""a" (res) + : "D" (w)); + + return res; +} +# 392 "./arch/x86/include/asm/bitops.h" 2 + +# 1 "./include/asm-generic/bitops/const_hweight.h" 1 +# 394 "./arch/x86/include/asm/bitops.h" 2 + +# 1 "./include/asm-generic/bitops/instrumented-atomic.h" 1 +# 14 "./include/asm-generic/bitops/instrumented-atomic.h" +# 1 "./include/linux/instrumented.h" 1 +# 24 "./include/linux/instrumented.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void instrument_read(const volatile void *v, size_t size) +{ + __kasan_check_read(v, size); + kcsan_check_access(v, size, 0); +} +# 39 "./include/linux/instrumented.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void instrument_write(const volatile void *v, size_t size) +{ + __kasan_check_write(v, size); + kcsan_check_access(v, size, 0x1); +} +# 54 "./include/linux/instrumented.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void instrument_atomic_read(const volatile void *v, size_t size) +{ + __kasan_check_read(v, size); + kcsan_check_access(v, size, 0x2); +} +# 69 "./include/linux/instrumented.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void instrument_atomic_write(const volatile void *v, size_t size) +{ + __kasan_check_write(v, size); + kcsan_check_access(v, size, 0x2 | 0x1); +} +# 85 "./include/linux/instrumented.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +instrument_copy_to_user(void *to, const void *from, unsigned long n) +{ + __kasan_check_read(from, n); + kcsan_check_access(from, n, 0); +} +# 102 "./include/linux/instrumented.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +instrument_copy_from_user(const void *to, const void *from, unsigned long n) +{ + __kasan_check_write(to, n); + kcsan_check_access(to, n, 0x1); +} +# 15 "./include/asm-generic/bitops/instrumented-atomic.h" 2 +# 26 "./include/asm-generic/bitops/instrumented-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_bit(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + arch_set_bit(nr, addr); +} +# 39 "./include/asm-generic/bitops/instrumented-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_bit(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + arch_clear_bit(nr, addr); +} +# 55 "./include/asm-generic/bitops/instrumented-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void change_bit(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + arch_change_bit(nr, addr); +} +# 68 "./include/asm-generic/bitops/instrumented-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool test_and_set_bit(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + return arch_test_and_set_bit(nr, addr); +} +# 81 "./include/asm-generic/bitops/instrumented-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + return arch_test_and_clear_bit(nr, addr); +} +# 94 "./include/asm-generic/bitops/instrumented-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool test_and_change_bit(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + return arch_test_and_change_bit(nr, addr); +} +# 396 "./arch/x86/include/asm/bitops.h" 2 +# 1 "./include/asm-generic/bitops/instrumented-non-atomic.h" 1 +# 25 "./include/asm-generic/bitops/instrumented-non-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __set_bit(long nr, volatile unsigned long *addr) +{ + instrument_write(addr + ((nr) / 64), sizeof(long)); + arch___set_bit(nr, addr); +} +# 40 "./include/asm-generic/bitops/instrumented-non-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __clear_bit(long nr, volatile unsigned long *addr) +{ + instrument_write(addr + ((nr) / 64), sizeof(long)); + arch___clear_bit(nr, addr); +} +# 55 "./include/asm-generic/bitops/instrumented-non-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __change_bit(long nr, volatile unsigned long *addr) +{ + instrument_write(addr + ((nr) / 64), sizeof(long)); + arch___change_bit(nr, addr); +} +# 69 "./include/asm-generic/bitops/instrumented-non-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __test_and_set_bit(long nr, volatile unsigned long *addr) +{ + instrument_write(addr + ((nr) / 64), sizeof(long)); + return arch___test_and_set_bit(nr, addr); +} +# 83 "./include/asm-generic/bitops/instrumented-non-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + instrument_write(addr + ((nr) / 64), sizeof(long)); + return arch___test_and_clear_bit(nr, addr); +} +# 97 "./include/asm-generic/bitops/instrumented-non-atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __test_and_change_bit(long nr, volatile unsigned long *addr) +{ + instrument_write(addr + ((nr) / 64), sizeof(long)); + return arch___test_and_change_bit(nr, addr); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool test_bit(long nr, const volatile unsigned long *addr) +{ + instrument_atomic_read(addr + ((nr) / 64), sizeof(long)); + return (__builtin_constant_p((nr)) ? constant_test_bit((nr), (addr)) : variable_test_bit((nr), (addr))); +} +# 397 "./arch/x86/include/asm/bitops.h" 2 +# 1 "./include/asm-generic/bitops/instrumented-lock.h" 1 +# 23 "./include/asm-generic/bitops/instrumented-lock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + arch_clear_bit_unlock(nr, addr); +} +# 38 "./include/asm-generic/bitops/instrumented-lock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + instrument_write(addr + ((nr) / 64), sizeof(long)); + arch___clear_bit_unlock(nr, addr); +} +# 53 "./include/asm-generic/bitops/instrumented-lock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + return arch_test_and_set_bit_lock(nr, addr); +} +# 71 "./include/asm-generic/bitops/instrumented-lock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +{ + instrument_atomic_write(addr + ((nr) / 64), sizeof(long)); + return arch_clear_bit_unlock_is_negative_byte(nr, addr); +} +# 398 "./arch/x86/include/asm/bitops.h" 2 + +# 1 "./include/asm-generic/bitops/le.h" 1 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 6 "./include/asm-generic/bitops/le.h" 2 +# 1 "./arch/x86/include/uapi/asm/byteorder.h" 1 + + + + +# 1 "./include/linux/byteorder/little_endian.h" 1 + + + + +# 1 "./include/uapi/linux/byteorder/little_endian.h" 1 +# 13 "./include/uapi/linux/byteorder/little_endian.h" +# 1 "./include/linux/swab.h" 1 + + + + +# 1 "./include/uapi/linux/swab.h" 1 + + + + + + + +# 1 "./arch/x86/include/uapi/asm/swab.h" 1 + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u32 __arch_swab32(__u32 val) +{ + asm("bswapl %0" : "=r" (val) : "0" (val)); + return val; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u64 __arch_swab64(__u64 val) +{ +# 31 "./arch/x86/include/uapi/asm/swab.h" + asm("bswapq %0" : "=r" (val) : "0" (val)); + return val; + +} +# 9 "./include/uapi/linux/swab.h" 2 +# 48 "./include/uapi/linux/swab.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u16 __fswab16(__u16 val) +{ + + + + return ((__u16)( (((__u16)(val) & (__u16)0x00ffU) << 8) | (((__u16)(val) & (__u16)0xff00U) >> 8))); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u32 __fswab32(__u32 val) +{ + + return __arch_swab32(val); + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u64 __fswab64(__u64 val) +{ + + return __arch_swab64(val); + + + + + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u32 __fswahw32(__u32 val) +{ + + + + return ((__u32)( (((__u32)(val) & (__u32)0x0000ffffUL) << 16) | (((__u32)(val) & (__u32)0xffff0000UL) >> 16))); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) __u32 __fswahb32(__u32 val) +{ + + + + return ((__u32)( (((__u32)(val) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(val) & (__u32)0xff00ff00UL) >> 8))); + +} +# 136 "./include/uapi/linux/swab.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __swab(const unsigned long y) +{ + + return (__u64)__builtin_bswap64((__u64)(y)); + + + +} +# 171 "./include/uapi/linux/swab.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u16 __swab16p(const __u16 *p) +{ + + + + return (__u16)__builtin_bswap16((__u16)(*p)); + +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u32 __swab32p(const __u32 *p) +{ + + + + return (__u32)__builtin_bswap32((__u32)(*p)); + +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u64 __swab64p(const __u64 *p) +{ + + + + return (__u64)__builtin_bswap64((__u64)(*p)); + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 __swahw32p(const __u32 *p) +{ + + + + return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p)); + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 __swahb32p(const __u32 *p) +{ + + + + return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p)); + +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __swab16s(__u16 *p) +{ + + + + *p = __swab16p(p); + +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __swab32s(__u32 *p) +{ + + + + *p = __swab32p(p); + +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __swab64s(__u64 *p) +{ + + + + *p = __swab64p(p); + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __swahw32s(__u32 *p) +{ + + + + *p = __swahw32p(p); + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __swahb32s(__u32 *p) +{ + + + + *p = __swahb32p(p); + +} +# 6 "./include/linux/swab.h" 2 +# 14 "./include/uapi/linux/byteorder/little_endian.h" 2 +# 44 "./include/uapi/linux/byteorder/little_endian.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __le64 __cpu_to_le64p(const __u64 *p) +{ + return ( __le64)*p; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u64 __le64_to_cpup(const __le64 *p) +{ + return ( __u64)*p; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __le32 __cpu_to_le32p(const __u32 *p) +{ + return ( __le32)*p; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u32 __le32_to_cpup(const __le32 *p) +{ + return ( __u32)*p; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __le16 __cpu_to_le16p(const __u16 *p) +{ + return ( __le16)*p; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u16 __le16_to_cpup(const __le16 *p) +{ + return ( __u16)*p; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __be64 __cpu_to_be64p(const __u64 *p) +{ + return ( __be64)__swab64p(p); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u64 __be64_to_cpup(const __be64 *p) +{ + return __swab64p((__u64 *)p); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __be32 __cpu_to_be32p(const __u32 *p) +{ + return ( __be32)__swab32p(p); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u32 __be32_to_cpup(const __be32 *p) +{ + return __swab32p((__u32 *)p); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __be16 __cpu_to_be16p(const __u16 *p) +{ + return ( __be16)__swab16p(p); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __u16 __be16_to_cpup(const __be16 *p) +{ + return __swab16p((__u16 *)p); +} +# 6 "./include/linux/byteorder/little_endian.h" 2 + + + + + +# 1 "./include/linux/byteorder/generic.h" 1 +# 144 "./include/linux/byteorder/generic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void le16_add_cpu(__le16 *var, u16 val) +{ + *var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void le32_add_cpu(__le32 *var, u32 val) +{ + *var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void le64_add_cpu(__le64 *var, u64 val) +{ + *var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + do { (void)(buf); } while (0); + buf++; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + do { (void)(buf); } while (0); + buf++; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void be16_add_cpu(__be16 *var, u16 val) +{ + *var = (( __be16)(__u16)__builtin_bswap16((__u16)(((__u16)__builtin_bswap16((__u16)(( __u16)(__be16)(*var))) + val)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void be32_add_cpu(__be32 *var, u32 val) +{ + *var = (( __be32)(__u32)__builtin_bswap32((__u32)(((__u32)__builtin_bswap32((__u32)(( __u32)(__be32)(*var))) + val)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void be64_add_cpu(__be64 *var, u64 val) +{ + *var = (( __be64)(__u64)__builtin_bswap64((__u64)(((__u64)__builtin_bswap64((__u64)(( __u64)(__be64)(*var))) + val)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = (( __be32)(__u32)__builtin_bswap32((__u32)((src[i])))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = (__u32)__builtin_bswap32((__u32)(( __u32)(__be32)(src[i]))); +} +# 12 "./include/linux/byteorder/little_endian.h" 2 +# 6 "./arch/x86/include/uapi/asm/byteorder.h" 2 +# 7 "./include/asm-generic/bitops/le.h" 2 + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long find_next_zero_bit_le(const void *addr, + unsigned long size, unsigned long offset) +{ + return find_next_zero_bit(addr, size, offset); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long find_next_bit_le(const void *addr, + unsigned long size, unsigned long offset) +{ + return find_next_bit(addr, size, offset); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long find_first_zero_bit_le(const void *addr, + unsigned long size) +{ + return find_first_zero_bit(addr, size); +} +# 53 "./include/asm-generic/bitops/le.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_bit_le(int nr, const void *addr) +{ + return test_bit(nr ^ 0, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_bit_le(int nr, void *addr) +{ + set_bit(nr ^ 0, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_bit_le(int nr, void *addr) +{ + clear_bit(nr ^ 0, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __set_bit_le(int nr, void *addr) +{ + __set_bit(nr ^ 0, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __clear_bit_le(int nr, void *addr) +{ + __clear_bit(nr ^ 0, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_set_bit_le(int nr, void *addr) +{ + return test_and_set_bit(nr ^ 0, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_clear_bit_le(int nr, void *addr) +{ + return test_and_clear_bit(nr ^ 0, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __test_and_set_bit_le(int nr, void *addr) +{ + return __test_and_set_bit(nr ^ 0, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __test_and_clear_bit_le(int nr, void *addr) +{ + return __test_and_clear_bit(nr ^ 0, addr); +} +# 400 "./arch/x86/include/asm/bitops.h" 2 + +# 1 "./include/asm-generic/bitops/ext2-atomic-setbit.h" 1 +# 402 "./arch/x86/include/asm/bitops.h" 2 +# 30 "./include/linux/bitops.h" 2 +# 65 "./include/linux/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return order; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long hweight_long(unsigned long w) +{ + return sizeof(w) == 4 ? (__builtin_constant_p(w) ? ((((unsigned int) ((!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))))) + ((unsigned int) ((!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))))))) : __arch_hweight32(w)) : (__builtin_constant_p((__u64)w) ? (((((unsigned int) ((!!(((__u64)w) & (1ULL << 0))) + (!!(((__u64)w) & (1ULL << 1))) + (!!(((__u64)w) & (1ULL << 2))) + (!!(((__u64)w) & (1ULL << 3))) + (!!(((__u64)w) & (1ULL << 4))) + (!!(((__u64)w) & (1ULL << 5))) + (!!(((__u64)w) & (1ULL << 6))) + (!!(((__u64)w) & (1ULL << 7))))) + ((unsigned int) ((!!((((__u64)w) >> 8) & (1ULL << 0))) + (!!((((__u64)w) >> 8) & (1ULL << 1))) + (!!((((__u64)w) >> 8) & (1ULL << 2))) + (!!((((__u64)w) >> 8) & (1ULL << 3))) + (!!((((__u64)w) >> 8) & (1ULL << 4))) + (!!((((__u64)w) >> 8) & (1ULL << 5))) + (!!((((__u64)w) >> 8) & (1ULL << 6))) + (!!((((__u64)w) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!((((__u64)w) >> 16) & (1ULL << 0))) + (!!((((__u64)w) >> 16) & (1ULL << 1))) + (!!((((__u64)w) >> 16) & (1ULL << 2))) + (!!((((__u64)w) >> 16) & (1ULL << 3))) + (!!((((__u64)w) >> 16) & (1ULL << 4))) + (!!((((__u64)w) >> 16) & (1ULL << 5))) + (!!((((__u64)w) >> 16) & (1ULL << 6))) + (!!((((__u64)w) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!(((((__u64)w) >> 16) >> 8) & (1ULL << 0))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 1))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 2))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 3))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 4))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 5))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 6))) + (!!(((((__u64)w) >> 16) >> 8) & (1ULL << 7))))))) + ((((unsigned int) ((!!((((__u64)w) >> 32) & (1ULL << 0))) + (!!((((__u64)w) >> 32) & (1ULL << 1))) + (!!((((__u64)w) >> 32) & (1ULL << 2))) + (!!((((__u64)w) >> 32) & (1ULL << 3))) + (!!((((__u64)w) >> 32) & (1ULL << 4))) + (!!((((__u64)w) >> 32) & (1ULL << 5))) + (!!((((__u64)w) >> 32) & (1ULL << 6))) + (!!((((__u64)w) >> 32) & (1ULL << 7))))) + ((unsigned int) ((!!(((((__u64)w) >> 32) >> 8) & (1ULL << 0))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 1))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 2))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 3))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 4))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 5))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 6))) + (!!(((((__u64)w) >> 32) >> 8) & (1ULL << 7)))))) + (((unsigned int) ((!!(((((__u64)w) >> 32) >> 16) & (1ULL << 0))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 1))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 2))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 3))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 4))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 5))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 6))) + (!!(((((__u64)w) >> 32) >> 16) & (1ULL << 7))))) + ((unsigned int) ((!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 0))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 1))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 2))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 3))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 4))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 5))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 6))) + (!!((((((__u64)w) >> 32) >> 16) >> 8) & (1ULL << 7)))))))) : __arch_hweight64((__u64)w)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u64 rol64(__u64 word, unsigned int shift) +{ + return (word << (shift & 63)) | (word >> ((-shift) & 63)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u64 ror64(__u64 word, unsigned int shift) +{ + return (word >> (shift & 63)) | (word << ((-shift) & 63)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << (shift & 31)) | (word >> ((-shift) & 31)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 ror32(__u32 word, unsigned int shift) +{ + return (word >> (shift & 31)) | (word << ((-shift) & 31)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u16 rol16(__u16 word, unsigned int shift) +{ + return (word << (shift & 15)) | (word >> ((-shift) & 15)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u16 ror16(__u16 word, unsigned int shift) +{ + return (word >> (shift & 15)) | (word << ((-shift) & 15)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u8 rol8(__u8 word, unsigned int shift) +{ + return (word << (shift & 7)) | (word >> ((-shift) & 7)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u8 ror8(__u8 word, unsigned int shift) +{ + return (word >> (shift & 7)) | (word << ((-shift) & 7)); +} +# 165 "./include/linux/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __s32 sign_extend32(__u32 value, int index) +{ + __u8 shift = 31 - index; + return (__s32)(value << shift) >> shift; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned fls_long(unsigned long l) +{ + if (sizeof(l) == 4) + return fls(l); + return fls64(l); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_count_order(unsigned int count) +{ + int order; + + order = fls(count) - 1; + if (count & (count - 1)) + order++; + return order; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_count_order_long(unsigned long l) +{ + if (l == 0UL) + return -1; + else if (l & (l - 1UL)) + return (int)fls_long(l); + else + return (int)fls_long(l) - 1; +} +# 223 "./include/linux/bitops.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __ffs64(u64 word) +{ + + + + + + + return __ffs((unsigned long)word); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void assign_bit(long nr, volatile unsigned long *addr, + bool value) +{ + if (value) + set_bit(nr, addr); + else + clear_bit(nr, addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __assign_bit(long nr, volatile unsigned long *addr, + bool value) +{ + if (value) + __set_bit(nr, addr); + else + __clear_bit(nr, addr); +} +# 299 "./include/linux/bitops.h" +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +# 13 "./include/linux/kernel.h" 2 +# 1 "./include/linux/log2.h" 1 +# 21 "./include/linux/log2.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const)) +int __ilog2_u32(u32 n) +{ + return fls(n) - 1; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const)) +int __ilog2_u64(u64 n) +{ + return fls64(n) - 1; +} +# 44 "./include/linux/log2.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const)) +bool is_power_of_2(unsigned long n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const)) +unsigned long __roundup_pow_of_two(unsigned long n) +{ + return 1UL << fls_long(n - 1); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const)) +unsigned long __rounddown_pow_of_two(unsigned long n) +{ + return 1UL << (fls_long(n) - 1); +} +# 197 "./include/linux/log2.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) +int __order_base_2(unsigned long n) +{ + return n > 1 ? ( __builtin_constant_p(n - 1) ? ( __builtin_constant_p(n - 1) ? ( (n - 1) < 2 ? 0 : (n - 1) & (1ULL << 63) ? 63 : (n - 1) & (1ULL << 62) ? 62 : (n - 1) & (1ULL << 61) ? 61 : (n - 1) & (1ULL << 60) ? 60 : (n - 1) & (1ULL << 59) ? 59 : (n - 1) & (1ULL << 58) ? 58 : (n - 1) & (1ULL << 57) ? 57 : (n - 1) & (1ULL << 56) ? 56 : (n - 1) & (1ULL << 55) ? 55 : (n - 1) & (1ULL << 54) ? 54 : (n - 1) & (1ULL << 53) ? 53 : (n - 1) & (1ULL << 52) ? 52 : (n - 1) & (1ULL << 51) ? 51 : (n - 1) & (1ULL << 50) ? 50 : (n - 1) & (1ULL << 49) ? 49 : (n - 1) & (1ULL << 48) ? 48 : (n - 1) & (1ULL << 47) ? 47 : (n - 1) & (1ULL << 46) ? 46 : (n - 1) & (1ULL << 45) ? 45 : (n - 1) & (1ULL << 44) ? 44 : (n - 1) & (1ULL << 43) ? 43 : (n - 1) & (1ULL << 42) ? 42 : (n - 1) & (1ULL << 41) ? 41 : (n - 1) & (1ULL << 40) ? 40 : (n - 1) & (1ULL << 39) ? 39 : (n - 1) & (1ULL << 38) ? 38 : (n - 1) & (1ULL << 37) ? 37 : (n - 1) & (1ULL << 36) ? 36 : (n - 1) & (1ULL << 35) ? 35 : (n - 1) & (1ULL << 34) ? 34 : (n - 1) & (1ULL << 33) ? 33 : (n - 1) & (1ULL << 32) ? 32 : (n - 1) & (1ULL << 31) ? 31 : (n - 1) & (1ULL << 30) ? 30 : (n - 1) & (1ULL << 29) ? 29 : (n - 1) & (1ULL << 28) ? 28 : (n - 1) & (1ULL << 27) ? 27 : (n - 1) & (1ULL << 26) ? 26 : (n - 1) & (1ULL << 25) ? 25 : (n - 1) & (1ULL << 24) ? 24 : (n - 1) & (1ULL << 23) ? 23 : (n - 1) & (1ULL << 22) ? 22 : (n - 1) & (1ULL << 21) ? 21 : (n - 1) & (1ULL << 20) ? 20 : (n - 1) & (1ULL << 19) ? 19 : (n - 1) & (1ULL << 18) ? 18 : (n - 1) & (1ULL << 17) ? 17 : (n - 1) & (1ULL << 16) ? 16 : (n - 1) & (1ULL << 15) ? 15 : (n - 1) & (1ULL << 14) ? 14 : (n - 1) & (1ULL << 13) ? 13 : (n - 1) & (1ULL << 12) ? 12 : (n - 1) & (1ULL << 11) ? 11 : (n - 1) & (1ULL << 10) ? 10 : (n - 1) & (1ULL << 9) ? 9 : (n - 1) & (1ULL << 8) ? 8 : (n - 1) & (1ULL << 7) ? 7 : (n - 1) & (1ULL << 6) ? 6 : (n - 1) & (1ULL << 5) ? 5 : (n - 1) & (1ULL << 4) ? 4 : (n - 1) & (1ULL << 3) ? 3 : (n - 1) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(n - 1) <= 4) ? __ilog2_u32(n - 1) : __ilog2_u64(n - 1) ) + 1 : 0; +} +# 224 "./include/linux/log2.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((const)) +int __bits_per(unsigned long n) +{ + if (n < 2) + return 1; + if (is_power_of_2(n)) + return ( __builtin_constant_p(n) ? ( ((n) == 0 || (n) == 1) ? 0 : ( __builtin_constant_p((n) - 1) ? ( __builtin_constant_p((n) - 1) ? ( ((n) - 1) < 2 ? 0 : ((n) - 1) & (1ULL << 63) ? 63 : ((n) - 1) & (1ULL << 62) ? 62 : ((n) - 1) & (1ULL << 61) ? 61 : ((n) - 1) & (1ULL << 60) ? 60 : ((n) - 1) & (1ULL << 59) ? 59 : ((n) - 1) & (1ULL << 58) ? 58 : ((n) - 1) & (1ULL << 57) ? 57 : ((n) - 1) & (1ULL << 56) ? 56 : ((n) - 1) & (1ULL << 55) ? 55 : ((n) - 1) & (1ULL << 54) ? 54 : ((n) - 1) & (1ULL << 53) ? 53 : ((n) - 1) & (1ULL << 52) ? 52 : ((n) - 1) & (1ULL << 51) ? 51 : ((n) - 1) & (1ULL << 50) ? 50 : ((n) - 1) & (1ULL << 49) ? 49 : ((n) - 1) & (1ULL << 48) ? 48 : ((n) - 1) & (1ULL << 47) ? 47 : ((n) - 1) & (1ULL << 46) ? 46 : ((n) - 1) & (1ULL << 45) ? 45 : ((n) - 1) & (1ULL << 44) ? 44 : ((n) - 1) & (1ULL << 43) ? 43 : ((n) - 1) & (1ULL << 42) ? 42 : ((n) - 1) & (1ULL << 41) ? 41 : ((n) - 1) & (1ULL << 40) ? 40 : ((n) - 1) & (1ULL << 39) ? 39 : ((n) - 1) & (1ULL << 38) ? 38 : ((n) - 1) & (1ULL << 37) ? 37 : ((n) - 1) & (1ULL << 36) ? 36 : ((n) - 1) & (1ULL << 35) ? 35 : ((n) - 1) & (1ULL << 34) ? 34 : ((n) - 1) & (1ULL << 33) ? 33 : ((n) - 1) & (1ULL << 32) ? 32 : ((n) - 1) & (1ULL << 31) ? 31 : ((n) - 1) & (1ULL << 30) ? 30 : ((n) - 1) & (1ULL << 29) ? 29 : ((n) - 1) & (1ULL << 28) ? 28 : ((n) - 1) & (1ULL << 27) ? 27 : ((n) - 1) & (1ULL << 26) ? 26 : ((n) - 1) & (1ULL << 25) ? 25 : ((n) - 1) & (1ULL << 24) ? 24 : ((n) - 1) & (1ULL << 23) ? 23 : ((n) - 1) & (1ULL << 22) ? 22 : ((n) - 1) & (1ULL << 21) ? 21 : ((n) - 1) & (1ULL << 20) ? 20 : ((n) - 1) & (1ULL << 19) ? 19 : ((n) - 1) & (1ULL << 18) ? 18 : ((n) - 1) & (1ULL << 17) ? 17 : ((n) - 1) & (1ULL << 16) ? 16 : ((n) - 1) & (1ULL << 15) ? 15 : ((n) - 1) & (1ULL << 14) ? 14 : ((n) - 1) & (1ULL << 13) ? 13 : ((n) - 1) & (1ULL << 12) ? 12 : ((n) - 1) & (1ULL << 11) ? 11 : ((n) - 1) & (1ULL << 10) ? 10 : ((n) - 1) & (1ULL << 9) ? 9 : ((n) - 1) & (1ULL << 8) ? 8 : ((n) - 1) & (1ULL << 7) ? 7 : ((n) - 1) & (1ULL << 6) ? 6 : ((n) - 1) & (1ULL << 5) ? 5 : ((n) - 1) & (1ULL << 4) ? 4 : ((n) - 1) & (1ULL << 3) ? 3 : ((n) - 1) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof((n) - 1) <= 4) ? __ilog2_u32((n) - 1) : __ilog2_u64((n) - 1) ) + 1) : __order_base_2(n) ) + 1; + return ( __builtin_constant_p(n) ? ( ((n) == 0 || (n) == 1) ? 0 : ( __builtin_constant_p((n) - 1) ? ( __builtin_constant_p((n) - 1) ? ( ((n) - 1) < 2 ? 0 : ((n) - 1) & (1ULL << 63) ? 63 : ((n) - 1) & (1ULL << 62) ? 62 : ((n) - 1) & (1ULL << 61) ? 61 : ((n) - 1) & (1ULL << 60) ? 60 : ((n) - 1) & (1ULL << 59) ? 59 : ((n) - 1) & (1ULL << 58) ? 58 : ((n) - 1) & (1ULL << 57) ? 57 : ((n) - 1) & (1ULL << 56) ? 56 : ((n) - 1) & (1ULL << 55) ? 55 : ((n) - 1) & (1ULL << 54) ? 54 : ((n) - 1) & (1ULL << 53) ? 53 : ((n) - 1) & (1ULL << 52) ? 52 : ((n) - 1) & (1ULL << 51) ? 51 : ((n) - 1) & (1ULL << 50) ? 50 : ((n) - 1) & (1ULL << 49) ? 49 : ((n) - 1) & (1ULL << 48) ? 48 : ((n) - 1) & (1ULL << 47) ? 47 : ((n) - 1) & (1ULL << 46) ? 46 : ((n) - 1) & (1ULL << 45) ? 45 : ((n) - 1) & (1ULL << 44) ? 44 : ((n) - 1) & (1ULL << 43) ? 43 : ((n) - 1) & (1ULL << 42) ? 42 : ((n) - 1) & (1ULL << 41) ? 41 : ((n) - 1) & (1ULL << 40) ? 40 : ((n) - 1) & (1ULL << 39) ? 39 : ((n) - 1) & (1ULL << 38) ? 38 : ((n) - 1) & (1ULL << 37) ? 37 : ((n) - 1) & (1ULL << 36) ? 36 : ((n) - 1) & (1ULL << 35) ? 35 : ((n) - 1) & (1ULL << 34) ? 34 : ((n) - 1) & (1ULL << 33) ? 33 : ((n) - 1) & (1ULL << 32) ? 32 : ((n) - 1) & (1ULL << 31) ? 31 : ((n) - 1) & (1ULL << 30) ? 30 : ((n) - 1) & (1ULL << 29) ? 29 : ((n) - 1) & (1ULL << 28) ? 28 : ((n) - 1) & (1ULL << 27) ? 27 : ((n) - 1) & (1ULL << 26) ? 26 : ((n) - 1) & (1ULL << 25) ? 25 : ((n) - 1) & (1ULL << 24) ? 24 : ((n) - 1) & (1ULL << 23) ? 23 : ((n) - 1) & (1ULL << 22) ? 22 : ((n) - 1) & (1ULL << 21) ? 21 : ((n) - 1) & (1ULL << 20) ? 20 : ((n) - 1) & (1ULL << 19) ? 19 : ((n) - 1) & (1ULL << 18) ? 18 : ((n) - 1) & (1ULL << 17) ? 17 : ((n) - 1) & (1ULL << 16) ? 16 : ((n) - 1) & (1ULL << 15) ? 15 : ((n) - 1) & (1ULL << 14) ? 14 : ((n) - 1) & (1ULL << 13) ? 13 : ((n) - 1) & (1ULL << 12) ? 12 : ((n) - 1) & (1ULL << 11) ? 11 : ((n) - 1) & (1ULL << 10) ? 10 : ((n) - 1) & (1ULL << 9) ? 9 : ((n) - 1) & (1ULL << 8) ? 8 : ((n) - 1) & (1ULL << 7) ? 7 : ((n) - 1) & (1ULL << 6) ? 6 : ((n) - 1) & (1ULL << 5) ? 5 : ((n) - 1) & (1ULL << 4) ? 4 : ((n) - 1) & (1ULL << 3) ? 3 : ((n) - 1) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof((n) - 1) <= 4) ? __ilog2_u32((n) - 1) : __ilog2_u64((n) - 1) ) + 1) : __order_base_2(n) ); +} +# 14 "./include/linux/kernel.h" 2 +# 1 "./include/linux/typecheck.h" 1 +# 15 "./include/linux/kernel.h" 2 +# 1 "./include/linux/printk.h" 1 + + + + + +# 1 "./include/linux/init.h" 1 +# 116 "./include/linux/init.h" +typedef int (*initcall_t)(void); +typedef void (*exitcall_t)(void); + + +typedef int initcall_entry_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) initcall_t initcall_from_entry(initcall_entry_t *entry) +{ + return offset_to_ptr(entry); +} +# 135 "./include/linux/init.h" +extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; + + +typedef void (*ctor_fn_t)(void); + +struct file_system_type; + + +extern int do_one_initcall(initcall_t fn); +extern char __attribute__((__section__(".init.data"))) boot_command_line[]; +extern char *saved_command_line; +extern unsigned int reset_devices; + + +void setup_arch(char **); +void prepare_namespace(void); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) init_rootfs(void); +extern struct file_system_type rootfs_fs_type; + + +extern bool rodata_enabled; + + +void mark_rodata_ro(void); + + +extern void (*late_time_init)(void); + +extern bool initcall_debug; +# 241 "./include/linux/init.h" +struct obs_kernel_param { + const char *str; + int (*setup_func)(char *); + int early; +}; +# 290 "./include/linux/init.h" +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) parse_early_param(void); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) parse_early_options(char *cmdline); +# 7 "./include/linux/printk.h" 2 +# 1 "./include/linux/kern_levels.h" 1 +# 8 "./include/linux/printk.h" 2 + +# 1 "./include/linux/cache.h" 1 + + + + +# 1 "./include/uapi/linux/kernel.h" 1 + + + + +# 1 "./include/uapi/linux/sysinfo.h" 1 + + + + + + + +struct sysinfo { + __kernel_long_t uptime; + __kernel_ulong_t loads[3]; + __kernel_ulong_t totalram; + __kernel_ulong_t freeram; + __kernel_ulong_t sharedram; + __kernel_ulong_t bufferram; + __kernel_ulong_t totalswap; + __kernel_ulong_t freeswap; + __u16 procs; + __u16 pad; + __kernel_ulong_t totalhigh; + __kernel_ulong_t freehigh; + __u32 mem_unit; + char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)]; +}; +# 6 "./include/uapi/linux/kernel.h" 2 +# 6 "./include/linux/cache.h" 2 +# 1 "./arch/x86/include/asm/cache.h" 1 +# 7 "./include/linux/cache.h" 2 +# 10 "./include/linux/printk.h" 2 + +extern const char linux_banner[]; +extern const char linux_proc_banner[]; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int printk_get_level(const char *buffer) +{ + if (buffer[0] == '\001' && buffer[1]) { + switch (buffer[1]) { + case '0' ... '7': + case 'c': + return buffer[1]; + } + } + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *printk_skip_level(const char *buffer) +{ + if (printk_get_level(buffer)) + return buffer + 2; + + return buffer; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *printk_skip_headers(const char *buffer) +{ + while (printk_get_level(buffer)) + buffer = printk_skip_level(buffer); + + return buffer; +} +# 62 "./include/linux/printk.h" +extern int console_printk[]; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void console_silent(void) +{ + (console_printk[0]) = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void console_verbose(void) +{ + if ((console_printk[0])) + (console_printk[0]) = 15; +} + + + +extern char devkmsg_log_str[]; +struct ctl_table; + +extern int suppress_printk; + +struct va_format { + const char *fmt; + va_list *va; +}; +# 142 "./include/linux/printk.h" +extern __attribute__((__format__(printf, 1, 2))) +void early_printk(const char *fmt, ...); + + + + + + +extern void printk_nmi_enter(void); +extern void printk_nmi_exit(void); +extern void printk_nmi_direct_enter(void); +extern void printk_nmi_direct_exit(void); +# 162 "./include/linux/printk.h" + __attribute__((__format__(printf, 5, 0))) +int vprintk_emit(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, va_list args); + + __attribute__((__format__(printf, 1, 0))) +int vprintk(const char *fmt, va_list args); + + __attribute__((__format__(printf, 1, 2))) __attribute__((__cold__)) +int printk(const char *fmt, ...); + + + + +__attribute__((__format__(printf, 1, 2))) __attribute__((__cold__)) int printk_deferred(const char *fmt, ...); + + + + + + +extern int __printk_ratelimit(const char *func); + +extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec); + +extern int printk_delay_msec; +extern int dmesg_restrict; + +extern int +devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf, + size_t *lenp, loff_t *ppos); + +extern void wake_up_klogd(void); + +char *log_buf_addr_get(void); +u32 log_buf_len_get(void); +void log_buf_vmcoreinfo_setup(void); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) setup_log_buf(int early); +__attribute__((__format__(printf, 1, 2))) void dump_stack_set_arch_desc(const char *fmt, ...); +void dump_stack_print_info(const char *log_lvl); +void show_regs_print_info(const char *log_lvl); +extern void dump_stack(void) __attribute__((__cold__)); +extern void printk_safe_flush(void); +extern void printk_safe_flush_on_panic(void); +# 280 "./include/linux/printk.h" +extern int kptr_restrict; +# 404 "./include/linux/printk.h" +# 1 "./include/linux/dynamic_debug.h" 1 + + + + + +# 1 "./include/linux/jump_label.h" 1 +# 79 "./include/linux/jump_label.h" +extern bool static_key_initialized; + + + + + + + +struct static_key { + atomic_t enabled; +# 102 "./include/linux/jump_label.h" + union { + unsigned long type; + struct jump_entry *entries; + struct static_key_mod *next; + }; +}; +# 117 "./include/linux/jump_label.h" +# 1 "./arch/x86/include/asm/jump_label.h" 1 +# 23 "./arch/x86/include/asm/jump_label.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_static_branch(struct static_key *key, bool branch) +{ + do { asm goto("1:" ".byte " "0x0f,0x1f,0x44,0x00,0" "\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (key), "i" (branch) : : l_yes); asm (""); } while (0) + + + + + + + ; + + return false; +l_yes: + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + do { asm goto("1:" ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t" "2:\n\t" ".pushsection __jump_table, \"aw\" \n\t" " " ".balign 8" " " "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" " " ".quad" " " "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (key), "i" (branch) : : l_yes); asm (""); } while (0) + + + + + + + + ; + + return false; +l_yes: + return true; +} +# 118 "./include/linux/jump_label.h" 2 + + + + +struct jump_entry { + s32 code; + s32 target; + long key; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return (unsigned long)&entry->code + entry->code; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return (unsigned long)&entry->target + entry->target; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + long offset = entry->key & ~3L; + + return (struct static_key *)((unsigned long)&entry->key + offset); +} +# 164 "./include/linux/jump_label.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool jump_entry_is_branch(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 1UL; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool jump_entry_is_init(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 2UL; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void jump_entry_set_init(struct jump_entry *entry) +{ + entry->key |= 2; +} + + + + + + +enum jump_label_type { + JUMP_LABEL_NOP = 0, + JUMP_LABEL_JMP, +}; + +struct module; +# 198 "./include/linux/jump_label.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool static_key_false(struct static_key *key) +{ + return arch_static_branch(key, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool static_key_true(struct static_key *key) +{ + return !arch_static_branch(key, true); +} + +extern struct jump_entry __start___jump_table[]; +extern struct jump_entry __stop___jump_table[]; + +extern void jump_label_init(void); +extern void jump_label_lock(void); +extern void jump_label_unlock(void); +extern void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type); +extern void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type); +extern bool arch_jump_label_transform_queue(struct jump_entry *entry, + enum jump_label_type type); +extern void arch_jump_label_transform_apply(void); +extern int jump_label_text_reserved(void *start, void *end); +extern void static_key_slow_inc(struct static_key *key); +extern void static_key_slow_dec(struct static_key *key); +extern void static_key_slow_inc_cpuslocked(struct static_key *key); +extern void static_key_slow_dec_cpuslocked(struct static_key *key); +extern void jump_label_apply_nops(struct module *mod); +extern int static_key_count(struct static_key *key); +extern void static_key_enable(struct static_key *key); +extern void static_key_disable(struct static_key *key); +extern void static_key_enable_cpuslocked(struct static_key *key); +extern void static_key_disable_cpuslocked(struct static_key *key); +# 346 "./include/linux/jump_label.h" +struct static_key_true { + struct static_key key; +}; + +struct static_key_false { + struct static_key key; +}; +# 385 "./include/linux/jump_label.h" +extern bool ____wrong_branch_error(void); +# 7 "./include/linux/dynamic_debug.h" 2 + + + + + + + +struct _ddebug { + + + + + const char *modname; + const char *function; + const char *filename; + const char *format; + unsigned int lineno:18; +# 40 "./include/linux/dynamic_debug.h" + unsigned int flags:8; + + union { + struct static_key_true dd_key_true; + struct static_key_false dd_key_false; + } key; + +} __attribute__((aligned(8))); + + + + +int ddebug_add_module(struct _ddebug *tab, unsigned int n, + const char *modname); +extern int ddebug_remove_module(const char *mod_name); +extern __attribute__((__format__(printf, 2, 3))) +void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); + +extern int ddebug_dyndbg_module_param_cb(char *param, char *val, + const char *modname); + +struct device; + +extern __attribute__((__format__(printf, 3, 4))) +void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, + const char *fmt, ...); + +struct net_device; + +extern __attribute__((__format__(printf, 3, 4))) +void __dynamic_netdev_dbg(struct _ddebug *descriptor, + const struct net_device *dev, + const char *fmt, ...); + +struct ib_device; + +extern __attribute__((__format__(printf, 3, 4))) +void __dynamic_ibdev_dbg(struct _ddebug *descriptor, + const struct ib_device *ibdev, + const char *fmt, ...); +# 405 "./include/linux/printk.h" 2 +# 560 "./include/linux/printk.h" +extern const struct file_operations kmsg_fops; + +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, char *linebuf, size_t linebuflen, + bool ascii); + +extern void print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +# 16 "./include/linux/kernel.h" 2 + + +# 1 "./arch/x86/include/asm/div64.h" 1 +# 75 "./arch/x86/include/asm/div64.h" +# 1 "./include/asm-generic/div64.h" 1 +# 76 "./arch/x86/include/asm/div64.h" 2 + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 mul_u64_u32_div(u64 a, u32 mul, u32 div) +{ + u64 q; + + asm ("mulq %2; divq %3" : "=a" (q) + : "a" (a), "rm" ((u64)mul), "rm" ((u64)div) + : "rdx"); + + return q; +} +# 19 "./include/linux/kernel.h" 2 +# 191 "./include/linux/kernel.h" +struct completion; +struct pt_regs; +struct user; +# 203 "./include/linux/kernel.h" +extern void ___might_sleep(const char *file, int line, int preempt_offset); +extern void __might_sleep(const char *file, int line, int preempt_offset); +extern void __cant_sleep(const char *file, int line, int preempt_offset); +# 304 "./include/linux/kernel.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 reciprocal_scale(u32 val, u32 ep_ro) +{ + return (u32)(((u64) val * ep_ro) >> 32); +} + + + + +void __might_fault(const char *file, int line); + + + + +extern struct atomic_notifier_head panic_notifier_list; +extern long (*panic_blink)(int state); +__attribute__((__format__(printf, 1, 2))) +void panic(const char *fmt, ...) __attribute__((__noreturn__)) __attribute__((__cold__)); +void nmi_panic(struct pt_regs *regs, const char *msg); +extern void oops_enter(void); +extern void oops_exit(void); +void print_oops_end_marker(void); +extern int oops_may_print(void); +void do_exit(long error_code) __attribute__((__noreturn__)); +void complete_and_exit(struct completion *, long) __attribute__((__noreturn__)); + + +int __attribute__((__warn_unused_result__)) _kstrtoul(const char *s, unsigned int base, unsigned long *res); +int __attribute__((__warn_unused_result__)) _kstrtol(const char *s, unsigned int base, long *res); + +int __attribute__((__warn_unused_result__)) kstrtoull(const char *s, unsigned int base, unsigned long long *res); +int __attribute__((__warn_unused_result__)) kstrtoll(const char *s, unsigned int base, long long *res); +# 351 "./include/linux/kernel.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtoul(const char *s, unsigned int base, unsigned long *res) +{ + + + + + if (sizeof(unsigned long) == sizeof(unsigned long long) && + __alignof__(unsigned long) == __alignof__(unsigned long long)) + return kstrtoull(s, base, (unsigned long long *)res); + else + return _kstrtoul(s, base, res); +} +# 379 "./include/linux/kernel.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtol(const char *s, unsigned int base, long *res) +{ + + + + + if (sizeof(long) == sizeof(long long) && + __alignof__(long) == __alignof__(long long)) + return kstrtoll(s, base, (long long *)res); + else + return _kstrtol(s, base, res); +} + +int __attribute__((__warn_unused_result__)) kstrtouint(const char *s, unsigned int base, unsigned int *res); +int __attribute__((__warn_unused_result__)) kstrtoint(const char *s, unsigned int base, int *res); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtou64(const char *s, unsigned int base, u64 *res) +{ + return kstrtoull(s, base, res); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtos64(const char *s, unsigned int base, s64 *res) +{ + return kstrtoll(s, base, res); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtou32(const char *s, unsigned int base, u32 *res) +{ + return kstrtouint(s, base, res); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtos32(const char *s, unsigned int base, s32 *res) +{ + return kstrtoint(s, base, res); +} + +int __attribute__((__warn_unused_result__)) kstrtou16(const char *s, unsigned int base, u16 *res); +int __attribute__((__warn_unused_result__)) kstrtos16(const char *s, unsigned int base, s16 *res); +int __attribute__((__warn_unused_result__)) kstrtou8(const char *s, unsigned int base, u8 *res); +int __attribute__((__warn_unused_result__)) kstrtos8(const char *s, unsigned int base, s8 *res); +int __attribute__((__warn_unused_result__)) kstrtobool(const char *s, bool *res); + +int __attribute__((__warn_unused_result__)) kstrtoull_from_user(const char *s, size_t count, unsigned int base, unsigned long long *res); +int __attribute__((__warn_unused_result__)) kstrtoll_from_user(const char *s, size_t count, unsigned int base, long long *res); +int __attribute__((__warn_unused_result__)) kstrtoul_from_user(const char *s, size_t count, unsigned int base, unsigned long *res); +int __attribute__((__warn_unused_result__)) kstrtol_from_user(const char *s, size_t count, unsigned int base, long *res); +int __attribute__((__warn_unused_result__)) kstrtouint_from_user(const char *s, size_t count, unsigned int base, unsigned int *res); +int __attribute__((__warn_unused_result__)) kstrtoint_from_user(const char *s, size_t count, unsigned int base, int *res); +int __attribute__((__warn_unused_result__)) kstrtou16_from_user(const char *s, size_t count, unsigned int base, u16 *res); +int __attribute__((__warn_unused_result__)) kstrtos16_from_user(const char *s, size_t count, unsigned int base, s16 *res); +int __attribute__((__warn_unused_result__)) kstrtou8_from_user(const char *s, size_t count, unsigned int base, u8 *res); +int __attribute__((__warn_unused_result__)) kstrtos8_from_user(const char *s, size_t count, unsigned int base, s8 *res); +int __attribute__((__warn_unused_result__)) kstrtobool_from_user(const char *s, size_t count, bool *res); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtou64_from_user(const char *s, size_t count, unsigned int base, u64 *res) +{ + return kstrtoull_from_user(s, count, base, res); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtos64_from_user(const char *s, size_t count, unsigned int base, s64 *res) +{ + return kstrtoll_from_user(s, count, base, res); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtou32_from_user(const char *s, size_t count, unsigned int base, u32 *res) +{ + return kstrtouint_from_user(s, count, base, res); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kstrtos32_from_user(const char *s, size_t count, unsigned int base, s32 *res) +{ + return kstrtoint_from_user(s, count, base, res); +} +# 466 "./include/linux/kernel.h" +extern unsigned long simple_strtoul(const char *,char **,unsigned int); +extern long simple_strtol(const char *,char **,unsigned int); +extern unsigned long long simple_strtoull(const char *,char **,unsigned int); +extern long long simple_strtoll(const char *,char **,unsigned int); + +extern int num_to_str(char *buf, int size, + unsigned long long num, unsigned int width); + + + +extern __attribute__((__format__(printf, 2, 3))) int sprintf(char *buf, const char * fmt, ...); +extern __attribute__((__format__(printf, 2, 0))) int vsprintf(char *buf, const char *, va_list); +extern __attribute__((__format__(printf, 3, 4))) +int snprintf(char *buf, size_t size, const char *fmt, ...); +extern __attribute__((__format__(printf, 3, 0))) +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +extern __attribute__((__format__(printf, 3, 4))) +int scnprintf(char *buf, size_t size, const char *fmt, ...); +extern __attribute__((__format__(printf, 3, 0))) +int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); +extern __attribute__((__format__(printf, 2, 3))) __attribute__((__malloc__)) +char *kasprintf(gfp_t gfp, const char *fmt, ...); +extern __attribute__((__format__(printf, 2, 0))) __attribute__((__malloc__)) +char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); +extern __attribute__((__format__(printf, 2, 0))) +const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); + +extern __attribute__((__format__(scanf, 2, 3))) +int sscanf(const char *, const char *, ...); +extern __attribute__((__format__(scanf, 2, 0))) +int vsscanf(const char *, const char *, va_list); + +extern int get_option(char **str, int *pint); +extern char *get_options(const char *str, int nints, int *ints); +extern unsigned long long memparse(const char *ptr, char **retptr); +extern bool parse_option_str(const char *str, const char *option); +extern char *next_arg(char *args, char **param, char **val); + +extern int core_kernel_text(unsigned long addr); +extern int init_kernel_text(unsigned long addr); +extern int core_kernel_data(unsigned long addr); +extern int __kernel_text_address(unsigned long addr); +extern int kernel_text_address(unsigned long addr); +extern int func_ptr_is_kernel_text(void *ptr); + +u64 int_pow(u64 base, unsigned int exp); +unsigned long int_sqrt(unsigned long); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 int_sqrt64(u64 x) +{ + return (u32)int_sqrt(x); +} + + + +extern unsigned int sysctl_oops_all_cpu_backtrace; + + + + +extern void bust_spinlocks(int yes); +extern int oops_in_progress; +extern int panic_timeout; +extern unsigned long panic_print; +extern int panic_on_oops; +extern int panic_on_unrecovered_nmi; +extern int panic_on_io_nmi; +extern int panic_on_warn; +extern unsigned long panic_on_taint; +extern bool panic_on_taint_nousertaint; +extern int sysctl_panic_on_rcu_stall; +extern int sysctl_panic_on_stackoverflow; + +extern bool crash_kexec_post_notifiers; + + + + + + +extern atomic_t panic_cpu; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_arch_panic_timeout(int timeout, int arch_default_timeout) +{ + if (panic_timeout == arch_default_timeout) + panic_timeout = timeout; +} +extern const char *print_tainted(void); +enum lockdep_ok { + LOCKDEP_STILL_OK, + LOCKDEP_NOW_UNRELIABLE +}; +extern void add_taint(unsigned flag, enum lockdep_ok); +extern int test_taint(unsigned flag); +extern unsigned long get_taint(void); +extern int root_mountflags; + +extern bool early_boot_irqs_disabled; + + + + + +extern enum system_states { + SYSTEM_BOOTING, + SYSTEM_SCHEDULING, + SYSTEM_RUNNING, + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, + SYSTEM_SUSPEND, +} system_state; +# 609 "./include/linux/kernel.h" +struct taint_flag { + char c_true; + char c_false; + bool module; +}; + +extern const struct taint_flag taint_flags[18]; + +extern const char hex_asc[]; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char *hex_byte_pack(char *buf, u8 byte) +{ + *buf++ = hex_asc[((byte) & 0xf0) >> 4]; + *buf++ = hex_asc[((byte) & 0x0f)]; + return buf; +} + +extern const char hex_asc_upper[]; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char *hex_byte_pack_upper(char *buf, u8 byte) +{ + *buf++ = hex_asc_upper[((byte) & 0xf0) >> 4]; + *buf++ = hex_asc_upper[((byte) & 0x0f)]; + return buf; +} + +extern int hex_to_bin(char ch); +extern int __attribute__((__warn_unused_result__)) hex2bin(u8 *dst, const char *src, size_t count); +extern char *bin2hex(char *dst, const void *src, size_t count); + +bool mac_pton(const char *s, u8 *mac); +# 665 "./include/linux/kernel.h" +enum ftrace_dump_mode { + DUMP_NONE, + DUMP_ALL, + DUMP_ORIG, +}; + + +void tracing_on(void); +void tracing_off(void); +int tracing_is_on(void); +void tracing_snapshot(void); +void tracing_snapshot_alloc(void); + +extern void tracing_start(void); +extern void tracing_stop(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__format__(printf, 1, 2))) +void ____trace_printk_check_format(const char *fmt, ...) +{ +} +# 744 "./include/linux/kernel.h" +extern __attribute__((__format__(printf, 2, 3))) +int __trace_bprintk(unsigned long ip, const char *fmt, ...); + +extern __attribute__((__format__(printf, 2, 3))) +int __trace_printk(unsigned long ip, const char *fmt, ...); +# 785 "./include/linux/kernel.h" +extern int __trace_bputs(unsigned long ip, const char *str); +extern int __trace_puts(unsigned long ip, const char *str, int size); + +extern void trace_dump_stack(int skip); +# 807 "./include/linux/kernel.h" +extern __attribute__((__format__(printf, 2, 0))) int +__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); + +extern __attribute__((__format__(printf, 2, 0))) int +__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); + +extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); +# 43 "fs/io_uring.c" 2 + +# 1 "./include/linux/errno.h" 1 + + + + +# 1 "./include/uapi/linux/errno.h" 1 +# 1 "./arch/x86/include/generated/uapi/asm/errno.h" 1 +# 1 "./include/uapi/asm-generic/errno.h" 1 + + + + +# 1 "./include/uapi/asm-generic/errno-base.h" 1 +# 6 "./include/uapi/asm-generic/errno.h" 2 +# 1 "./arch/x86/include/generated/uapi/asm/errno.h" 2 +# 1 "./include/uapi/linux/errno.h" 2 +# 6 "./include/linux/errno.h" 2 +# 45 "fs/io_uring.c" 2 +# 1 "./include/linux/syscalls.h" 1 +# 12 "./include/linux/syscalls.h" +struct __aio_sigset; +struct epoll_event; +struct iattr; +struct inode; +struct iocb; +struct io_event; +struct iovec; +struct __kernel_old_itimerval; +struct kexec_segment; +struct linux_dirent; +struct linux_dirent64; +struct list_head; +struct mmap_arg_struct; +struct msgbuf; +struct user_msghdr; +struct mmsghdr; +struct msqid_ds; +struct new_utsname; +struct nfsctl_arg; +struct __old_kernel_stat; +struct oldold_utsname; +struct old_utsname; +struct pollfd; +struct rlimit; +struct rlimit64; +struct rusage; +struct sched_param; +struct sched_attr; +struct sel_arg_struct; +struct semaphore; +struct sembuf; +struct shmid_ds; +struct sockaddr; +struct stat; +struct stat64; +struct statfs; +struct statfs64; +struct statx; +struct __sysctl_args; +struct sysinfo; +struct timespec; +struct __kernel_old_timeval; +struct __kernel_timex; +struct timezone; +struct tms; +struct utimbuf; +struct mq_attr; +struct compat_stat; +struct old_timeval32; +struct robust_list_head; +struct getcpu_cache; +struct old_linux_dirent; +struct perf_event_attr; +struct file_handle; +struct sigaltstack; +struct rseq; +union bpf_attr; +struct io_uring_params; +struct clone_args; +struct open_how; + + +# 1 "./include/uapi/linux/aio_abi.h" 1 +# 31 "./include/uapi/linux/aio_abi.h" +# 1 "./include/linux/fs.h" 1 + + + + + +# 1 "./include/linux/wait_bit.h" 1 + + + + + + + +# 1 "./include/linux/wait.h" 1 + + + + + + +# 1 "./include/linux/list.h" 1 + + + + + + +# 1 "./include/linux/poison.h" 1 +# 8 "./include/linux/list.h" 2 +# 33 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void INIT_LIST_HEAD(struct list_head *list) +{ + do { do { extern void __compiletime_assert_0(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->next) == sizeof(char) || sizeof(list->next) == sizeof(short) || sizeof(list->next) == sizeof(int) || sizeof(list->next) == sizeof(long)) || sizeof(list->next) == sizeof(long long))) __compiletime_assert_0(); } while (0); do { *(volatile typeof(list->next) *)&(list->next) = (list); } while (0); } while (0); + list->prev = list; +} + + +extern bool __list_add_valid(struct list_head *new, + struct list_head *prev, + struct list_head *next); +extern bool __list_del_entry_valid(struct list_head *entry); +# 63 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + if (!__list_add_valid(new, prev, next)) + return; + + next->prev = new; + new->next = next; + new->prev = prev; + do { do { extern void __compiletime_assert_1(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_1(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (new); } while (0); } while (0); +} +# 84 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} +# 98 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} +# 110 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_del(struct list_head * prev, struct list_head * next) +{ + next->prev = prev; + do { do { extern void __compiletime_assert_2(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_2(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (next); } while (0); } while (0); +} +# 124 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_del_clearprev(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->prev = ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_del_entry(struct list_head *entry) +{ + if (!__list_del_entry_valid(entry)) + return; + + __list_del(entry->prev, entry->next); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_del(struct list_head *entry) +{ + __list_del_entry(entry); + entry->next = ((void *) 0x100 + (0xdead000000000000UL)); + entry->prev = ((void *) 0x122 + (0xdead000000000000UL)); +} +# 158 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} +# 174 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_swap(struct list_head *entry1, + struct list_head *entry2) +{ + struct list_head *pos = entry2->prev; + + list_del(entry2); + list_replace(entry1, entry2); + if (pos == entry1) + pos = entry2; + list_add(entry1, pos); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_del_init(struct list_head *entry) +{ + __list_del_entry(entry); + INIT_LIST_HEAD(entry); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_move(struct list_head *list, struct list_head *head) +{ + __list_del_entry(list); + list_add(list, head); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del_entry(list); + list_add_tail(list, head); +} +# 240 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_bulk_move_tail(struct list_head *head, + struct list_head *first, + struct list_head *last) +{ + first->prev->next = last->next; + last->next->prev = first->prev; + + head->prev->next = first; + first->prev = head->prev; + + last->next = head; + head->prev = last; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_is_first(const struct list_head *list, + const struct list_head *head) +{ + return list->prev == head; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_empty(const struct list_head *head) +{ + return ({ do { extern void __compiletime_assert_3(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(head->next) == sizeof(char) || sizeof(head->next) == sizeof(short) || sizeof(head->next) == sizeof(int) || sizeof(head->next) == sizeof(long)) || sizeof(head->next) == sizeof(long long))) __compiletime_assert_3(); } while (0); ({ typeof( _Generic((head->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head->next))) __x = (*(const volatile typeof( _Generic((head->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head->next))) *)&(head->next)); do { } while (0); (typeof(head->next))__x; }); }) == head; +} +# 298 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_rotate_left(struct list_head *head) +{ + struct list_head *first; + + if (!list_empty(head)) { + first = head->next; + list_move_tail(first, head); + } +} +# 325 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_rotate_to_front(struct list_head *list, + struct list_head *head) +{ + + + + + + list_move_tail(head, list); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int list_is_singular(const struct list_head *head) +{ + return !list_empty(head) && (head->next == head->prev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} +# 371 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} +# 399 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_cut_before(struct list_head *list, + struct list_head *head, + struct list_head *entry) +{ + if (head->next == entry) { + INIT_LIST_HEAD(list); + return; + } + list->next = head->next; + list->next->prev = list; + list->prev = entry->prev; + list->prev->next = list; + head->next = entry; + entry->prev = head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_splice(const struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice(const struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head, head->next); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); +} +# 460 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} +# 477 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); + INIT_LIST_HEAD(list); + } +} +# 765 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = ((void *)0); + h->pprev = ((void *)0); +} +# 779 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} +# 792 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hlist_unhashed_lockless(const struct hlist_node *h) +{ + return !({ do { extern void __compiletime_assert_4(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(h->pprev) == sizeof(char) || sizeof(h->pprev) == sizeof(short) || sizeof(h->pprev) == sizeof(int) || sizeof(h->pprev) == sizeof(long)) || sizeof(h->pprev) == sizeof(long long))) __compiletime_assert_4(); } while (0); ({ typeof( _Generic((h->pprev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->pprev))) __x = (*(const volatile typeof( _Generic((h->pprev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->pprev))) *)&(h->pprev)); do { } while (0); (typeof(h->pprev))__x; }); }); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hlist_empty(const struct hlist_head *h) +{ + return !({ do { extern void __compiletime_assert_5(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(h->first) == sizeof(char) || sizeof(h->first) == sizeof(short) || sizeof(h->first) == sizeof(int) || sizeof(h->first) == sizeof(long)) || sizeof(h->first) == sizeof(long long))) __compiletime_assert_5(); } while (0); ({ typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) __x = (*(const volatile typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) *)&(h->first)); do { } while (0); (typeof(h->first))__x; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + + do { do { extern void __compiletime_assert_6(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pprev) == sizeof(char) || sizeof(*pprev) == sizeof(short) || sizeof(*pprev) == sizeof(int) || sizeof(*pprev) == sizeof(long)) || sizeof(*pprev) == sizeof(long long))) __compiletime_assert_6(); } while (0); do { *(volatile typeof(*pprev) *)&(*pprev) = (next); } while (0); } while (0); + if (next) + do { do { extern void __compiletime_assert_7(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(next->pprev) == sizeof(char) || sizeof(next->pprev) == sizeof(short) || sizeof(next->pprev) == sizeof(int) || sizeof(next->pprev) == sizeof(long)) || sizeof(next->pprev) == sizeof(long long))) __compiletime_assert_7(); } while (0); do { *(volatile typeof(next->pprev) *)&(next->pprev) = (pprev); } while (0); } while (0); +} +# 823 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = ((void *) 0x100 + (0xdead000000000000UL)); + n->pprev = ((void *) 0x122 + (0xdead000000000000UL)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} +# 852 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + do { do { extern void __compiletime_assert_8(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->next) == sizeof(char) || sizeof(n->next) == sizeof(short) || sizeof(n->next) == sizeof(int) || sizeof(n->next) == sizeof(long)) || sizeof(n->next) == sizeof(long long))) __compiletime_assert_8(); } while (0); do { *(volatile typeof(n->next) *)&(n->next) = (first); } while (0); } while (0); + if (first) + do { do { extern void __compiletime_assert_9(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(first->pprev) == sizeof(char) || sizeof(first->pprev) == sizeof(short) || sizeof(first->pprev) == sizeof(int) || sizeof(first->pprev) == sizeof(long)) || sizeof(first->pprev) == sizeof(long long))) __compiletime_assert_9(); } while (0); do { *(volatile typeof(first->pprev) *)&(first->pprev) = (&n->next); } while (0); } while (0); + do { do { extern void __compiletime_assert_10(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(h->first) == sizeof(char) || sizeof(h->first) == sizeof(short) || sizeof(h->first) == sizeof(int) || sizeof(h->first) == sizeof(long)) || sizeof(h->first) == sizeof(long long))) __compiletime_assert_10(); } while (0); do { *(volatile typeof(h->first) *)&(h->first) = (n); } while (0); } while (0); + do { do { extern void __compiletime_assert_11(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_11(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&h->first); } while (0); } while (0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + do { do { extern void __compiletime_assert_12(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_12(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (next->pprev); } while (0); } while (0); + do { do { extern void __compiletime_assert_13(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->next) == sizeof(char) || sizeof(n->next) == sizeof(short) || sizeof(n->next) == sizeof(int) || sizeof(n->next) == sizeof(long)) || sizeof(n->next) == sizeof(long long))) __compiletime_assert_13(); } while (0); do { *(volatile typeof(n->next) *)&(n->next) = (next); } while (0); } while (0); + do { do { extern void __compiletime_assert_14(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(next->pprev) == sizeof(char) || sizeof(next->pprev) == sizeof(short) || sizeof(next->pprev) == sizeof(int) || sizeof(next->pprev) == sizeof(long)) || sizeof(next->pprev) == sizeof(long long))) __compiletime_assert_14(); } while (0); do { *(volatile typeof(next->pprev) *)&(next->pprev) = (&n->next); } while (0); } while (0); + do { do { extern void __compiletime_assert_15(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*(n->pprev)) == sizeof(char) || sizeof(*(n->pprev)) == sizeof(short) || sizeof(*(n->pprev)) == sizeof(int) || sizeof(*(n->pprev)) == sizeof(long)) || sizeof(*(n->pprev)) == sizeof(long long))) __compiletime_assert_15(); } while (0); do { *(volatile typeof(*(n->pprev)) *)&(*(n->pprev)) = (n); } while (0); } while (0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_behind(struct hlist_node *n, + struct hlist_node *prev) +{ + do { do { extern void __compiletime_assert_16(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->next) == sizeof(char) || sizeof(n->next) == sizeof(short) || sizeof(n->next) == sizeof(int) || sizeof(n->next) == sizeof(long)) || sizeof(n->next) == sizeof(long long))) __compiletime_assert_16(); } while (0); do { *(volatile typeof(n->next) *)&(n->next) = (prev->next); } while (0); } while (0); + do { do { extern void __compiletime_assert_17(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_17(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (n); } while (0); } while (0); + do { do { extern void __compiletime_assert_18(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_18(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&prev->next); } while (0); } while (0); + + if (n->next) + do { do { extern void __compiletime_assert_19(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->next->pprev) == sizeof(char) || sizeof(n->next->pprev) == sizeof(short) || sizeof(n->next->pprev) == sizeof(int) || sizeof(n->next->pprev) == sizeof(long)) || sizeof(n->next->pprev) == sizeof(long long))) __compiletime_assert_19(); } while (0); do { *(volatile typeof(n->next->pprev) *)&(n->next->pprev) = (&n->next); } while (0); } while (0); +} +# 900 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_fake(struct hlist_node *n) +{ + n->pprev = &n->next; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hlist_fake(struct hlist_node *h) +{ + return h->pprev == &h->next; +} +# 922 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) +{ + return !n->next && n->pprev == &h->first; +} +# 936 "./include/linux/list.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_move_list(struct hlist_head *old, + struct hlist_head *new) +{ + new->first = old->first; + if (new->first) + new->first->pprev = &new->first; + old->first = ((void *)0); +} +# 8 "./include/linux/wait.h" 2 + +# 1 "./include/linux/spinlock.h" 1 +# 51 "./include/linux/spinlock.h" +# 1 "./include/linux/preempt.h" 1 +# 78 "./include/linux/preempt.h" +# 1 "./arch/x86/include/asm/preempt.h" 1 + + + + + +# 1 "./arch/x86/include/asm/percpu.h" 1 +# 88 "./arch/x86/include/asm/percpu.h" +extern void __bad_percpu_size(void); +# 524 "./arch/x86/include/asm/percpu.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool x86_this_cpu_constant_test_bit(unsigned int nr, + const unsigned long *addr) +{ + unsigned long *a = + (unsigned long *)addr + nr / 64; + + + return ((1UL << (nr % 64)) & ({ typeof(*a) pfo_ret__; switch (sizeof(*a)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (*a)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; default: __bad_percpu_size(); } pfo_ret__; })) != 0; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool x86_this_cpu_variable_test_bit(int nr, + const unsigned long *addr) +{ + bool oldbit; + + asm volatile("btl ""%%""gs"":" "%" "2"",%1" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); + + return oldbit; +} + + + + + + + +# 1 "./include/asm-generic/percpu.h" 1 + + + + + +# 1 "./include/linux/threads.h" 1 +# 7 "./include/asm-generic/percpu.h" 2 +# 1 "./include/linux/percpu-defs.h" 1 +# 308 "./include/linux/percpu-defs.h" +extern void __bad_size_call_parameter(void); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __this_cpu_preempt_check(const char *op) { } +# 8 "./include/asm-generic/percpu.h" 2 +# 19 "./include/asm-generic/percpu.h" +extern unsigned long __per_cpu_offset[8192]; +# 48 "./include/asm-generic/percpu.h" +extern void setup_per_cpu_areas(void); +# 557 "./arch/x86/include/asm/percpu.h" 2 + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_this_cpu_off; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(unsigned long) this_cpu_off; +# 7 "./arch/x86/include/asm/preempt.h" 2 +# 1 "./include/linux/thread_info.h" 1 +# 12 "./include/linux/thread_info.h" +# 1 "./include/linux/bug.h" 1 + + + + +# 1 "./arch/x86/include/asm/bug.h" 1 +# 86 "./arch/x86/include/asm/bug.h" +# 1 "./include/asm-generic/bug.h" 1 +# 24 "./include/asm-generic/bug.h" +struct bug_entry { + + + + signed int bug_addr_disp; + + + + + + signed int file_disp; + + unsigned short line; + + unsigned short flags; +}; +# 92 "./include/asm-generic/bug.h" +extern __attribute__((__format__(printf, 1, 2))) void __warn_printk(const char *fmt, ...); +# 110 "./include/asm-generic/bug.h" +struct warn_args; +struct pt_regs; + +void __warn(const char *file, int line, void *caller, unsigned taint, + struct pt_regs *regs, struct warn_args *args); +# 87 "./arch/x86/include/asm/bug.h" 2 +# 6 "./include/linux/bug.h" 2 + + + +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; + +struct pt_regs; +# 34 "./include/linux/bug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_warning_bug(const struct bug_entry *bug) +{ + return bug->flags & (1 << 0); +} + +struct bug_entry *find_bug(unsigned long bugaddr); + +enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); + + +int is_valid_bugaddr(unsigned long addr); + +void generic_bug_clear_once(void); +# 70 "./include/linux/bug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool check_data_corruption(bool v) { return v; } +# 13 "./include/linux/thread_info.h" 2 +# 1 "./include/linux/restart_block.h" 1 +# 10 "./include/linux/restart_block.h" +# 1 "./include/linux/time64.h" 1 + + + + +# 1 "./include/linux/math64.h" 1 + + + + + +# 1 "./include/vdso/math64.h" 1 + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + + + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} +# 7 "./include/linux/math64.h" 2 +# 25 "./include/linux/math64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} +# 39 "./include/linux/math64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} +# 53 "./include/linux/math64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} +# 66 "./include/linux/math64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 div64_u64(u64 dividend, u64 divisor) +{ + return dividend / divisor; +} +# 78 "./include/linux/math64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 div64_s64(s64 dividend, s64 divisor) +{ + return dividend / divisor; +} +# 124 "./include/linux/math64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 div_u64(u64 dividend, u32 divisor) +{ + u32 remainder; + return div_u64_rem(dividend, divisor, &remainder); +} +# 137 "./include/linux/math64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 div_s64(s64 dividend, s32 divisor) +{ + s32 remainder; + return div_s64_rem(dividend, divisor, &remainder); +} + + +u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 mul_u32_u32(u32 a, u32 b) +{ + return (u64)a * b; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +# 6 "./include/linux/time64.h" 2 +# 1 "./include/vdso/time64.h" 1 +# 7 "./include/linux/time64.h" 2 + +typedef __s64 time64_t; +typedef __u64 timeu64_t; + +# 1 "./include/uapi/linux/time.h" 1 + + + + + +# 1 "./include/uapi/linux/time_types.h" 1 + + + + + + +struct __kernel_timespec { + __kernel_time64_t tv_sec; + long long tv_nsec; +}; + +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; + struct __kernel_timespec it_value; +}; +# 25 "./include/uapi/linux/time_types.h" +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_long_t tv_usec; +}; + + +struct __kernel_old_timespec { + __kernel_old_time_t tv_sec; + long tv_nsec; +}; + +struct __kernel_old_itimerval { + struct __kernel_old_timeval it_interval; + struct __kernel_old_timeval it_value; +}; + +struct __kernel_sock_timeval { + __s64 tv_sec; + __s64 tv_usec; +}; +# 7 "./include/uapi/linux/time.h" 2 +# 33 "./include/uapi/linux/time.h" +struct timezone { + int tz_minuteswest; + int tz_dsttime; +}; +# 12 "./include/linux/time64.h" 2 + +struct timespec64 { + time64_t tv_sec; + long tv_nsec; +}; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; +# 41 "./include/linux/time64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int timespec64_equal(const struct timespec64 *a, + const struct timespec64 *b) +{ + return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct timespec64 timespec64_add(struct timespec64 lhs, + struct timespec64 rhs) +{ + struct timespec64 ts_delta; + set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + return ts_delta; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct timespec64 timespec64_sub(struct timespec64 lhs, + struct timespec64 rhs) +{ + struct timespec64 ts_delta; + set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, + lhs.tv_nsec - rhs.tv_nsec); + return ts_delta; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timespec64_valid(const struct timespec64 *ts) +{ + + if (ts->tv_sec < 0) + return false; + + if ((unsigned long)ts->tv_nsec >= 1000000000L) + return false; + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timespec64_valid_strict(const struct timespec64 *ts) +{ + if (!timespec64_valid(ts)) + return false; + + if ((unsigned long long)ts->tv_sec >= (((s64)~((u64)1 << 63)) / 1000000000L)) + return false; + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timespec64_valid_settod(const struct timespec64 *ts) +{ + if (!timespec64_valid(ts)) + return false; + + if ((unsigned long long)ts->tv_sec >= ((((s64)~((u64)1 << 63)) / 1000000000L) - (30LL * 365 * 24 *3600))) + return false; + return true; +} +# 125 "./include/linux/time64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 timespec64_to_ns(const struct timespec64 *ts) +{ + return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec; +} + + + + + + + +extern struct timespec64 ns_to_timespec64(const s64 nsec); +# 146 "./include/linux/time64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void timespec64_add_ns(struct timespec64 *a, u64 ns) +{ + a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns); + a->tv_nsec = ns; +} + + + + + +extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, + const struct timespec64 rhs); +# 11 "./include/linux/restart_block.h" 2 + +struct timespec; +struct old_timespec32; +struct pollfd; + +enum timespec_type { + TT_NONE = 0, + TT_NATIVE = 1, + TT_COMPAT = 2, +}; + + + + +struct restart_block { + long (*fn)(struct restart_block *); + union { + + struct { + u32 *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 *uaddr2; + } futex; + + struct { + clockid_t clockid; + enum timespec_type type; + union { + struct __kernel_timespec *rmtp; + struct old_timespec32 *compat_rmtp; + }; + u64 expires; + } nanosleep; + + struct { + struct pollfd *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +extern long do_no_restart_syscall(struct restart_block *parm); +# 14 "./include/linux/thread_info.h" 2 + + + + + + + +# 1 "./arch/x86/include/asm/current.h" 1 +# 9 "./arch/x86/include/asm/current.h" +struct task_struct; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_current_task; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) current_task; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) struct task_struct *get_current(void) +{ + return ({ typeof(current_task) pfo_ret__; switch (sizeof(current_task)) { case 1: asm("mov" "b ""%%""gs"":" "%" "P1"",%0" : "=q" (pfo_ret__) : "p" (&(current_task))); break; case 2: asm("mov" "w ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 4: asm("mov" "l ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 8: asm("mov" "q ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; default: __bad_percpu_size(); } pfo_ret__; }); +} +# 22 "./include/linux/thread_info.h" 2 +# 31 "./include/linux/thread_info.h" +enum { + BAD_STACK = -1, + NOT_STACK = 0, + GOOD_FRAME, + GOOD_STACK, +}; + +# 1 "./arch/x86/include/asm/thread_info.h" 1 +# 12 "./arch/x86/include/asm/thread_info.h" +# 1 "./arch/x86/include/asm/page.h" 1 +# 9 "./arch/x86/include/asm/page.h" +# 1 "./arch/x86/include/asm/page_types.h" 1 + + + + + + +# 1 "./include/linux/mem_encrypt.h" 1 +# 17 "./include/linux/mem_encrypt.h" +# 1 "./arch/x86/include/asm/mem_encrypt.h" 1 +# 17 "./arch/x86/include/asm/mem_encrypt.h" +# 1 "./arch/x86/include/uapi/asm/bootparam.h" 1 +# 43 "./arch/x86/include/uapi/asm/bootparam.h" +# 1 "./include/linux/screen_info.h" 1 + + + + +# 1 "./include/uapi/linux/screen_info.h" 1 +# 11 "./include/uapi/linux/screen_info.h" +struct screen_info { + __u8 orig_x; + __u8 orig_y; + __u16 ext_mem_k; + __u16 orig_video_page; + __u8 orig_video_mode; + __u8 orig_video_cols; + __u8 flags; + __u8 unused2; + __u16 orig_video_ega_bx; + __u16 unused3; + __u8 orig_video_lines; + __u8 orig_video_isVGA; + __u16 orig_video_points; + + + __u16 lfb_width; + __u16 lfb_height; + __u16 lfb_depth; + __u32 lfb_base; + __u32 lfb_size; + __u16 cl_magic, cl_offset; + __u16 lfb_linelength; + __u8 red_size; + __u8 red_pos; + __u8 green_size; + __u8 green_pos; + __u8 blue_size; + __u8 blue_pos; + __u8 rsvd_size; + __u8 rsvd_pos; + __u16 vesapm_seg; + __u16 vesapm_off; + __u16 pages; + __u16 vesa_attributes; + __u32 capabilities; + __u32 ext_lfb_base; + __u8 _reserved[2]; +} __attribute__((packed)); +# 6 "./include/linux/screen_info.h" 2 + +extern struct screen_info screen_info; +# 44 "./arch/x86/include/uapi/asm/bootparam.h" 2 +# 1 "./include/linux/apm_bios.h" 1 +# 9 "./include/linux/apm_bios.h" +# 1 "./include/uapi/linux/apm_bios.h" 1 +# 22 "./include/uapi/linux/apm_bios.h" +typedef unsigned short apm_event_t; +typedef unsigned short apm_eventinfo_t; + +struct apm_bios_info { + __u16 version; + __u16 cseg; + __u32 offset; + __u16 cseg_16; + __u16 dseg; + __u16 flags; + __u16 cseg_len; + __u16 cseg_16_len; + __u16 dseg_len; +}; +# 133 "./include/uapi/linux/apm_bios.h" +# 1 "./include/uapi/linux/ioctl.h" 1 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/ioctl.h" 1 +# 1 "./include/asm-generic/ioctl.h" 1 + + + + +# 1 "./include/uapi/asm-generic/ioctl.h" 1 +# 6 "./include/asm-generic/ioctl.h" 2 + + + + + +extern unsigned int __invalid_size_argument_for_IOC; +# 1 "./arch/x86/include/generated/uapi/asm/ioctl.h" 2 +# 6 "./include/uapi/linux/ioctl.h" 2 +# 134 "./include/uapi/linux/apm_bios.h" 2 +# 10 "./include/linux/apm_bios.h" 2 +# 26 "./include/linux/apm_bios.h" +struct apm_info { + struct apm_bios_info bios; + unsigned short connection_version; + int get_power_status_broken; + int get_power_status_swabinminutes; + int allow_ints; + int forbid_idle; + int realmode_power_off; + int disabled; +}; +# 85 "./include/linux/apm_bios.h" +extern struct apm_info apm_info; +# 45 "./arch/x86/include/uapi/asm/bootparam.h" 2 +# 1 "./include/linux/edd.h" 1 +# 24 "./include/linux/edd.h" +# 1 "./include/uapi/linux/edd.h" 1 +# 72 "./include/uapi/linux/edd.h" +struct edd_device_params { + __u16 length; + __u16 info_flags; + __u32 num_default_cylinders; + __u32 num_default_heads; + __u32 sectors_per_track; + __u64 number_of_sectors; + __u16 bytes_per_sector; + __u32 dpte_ptr; + __u16 key; + __u8 device_path_info_length; + __u8 reserved2; + __u16 reserved3; + __u8 host_bus_type[4]; + __u8 interface_type[8]; + union { + struct { + __u16 base_address; + __u16 reserved1; + __u32 reserved2; + } __attribute__ ((packed)) isa; + struct { + __u8 bus; + __u8 slot; + __u8 function; + __u8 channel; + __u32 reserved; + } __attribute__ ((packed)) pci; + + struct { + __u64 reserved; + } __attribute__ ((packed)) ibnd; + struct { + __u64 reserved; + } __attribute__ ((packed)) xprs; + struct { + __u64 reserved; + } __attribute__ ((packed)) htpt; + struct { + __u64 reserved; + } __attribute__ ((packed)) unknown; + } interface_path; + union { + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) ata; + struct { + __u8 device; + __u8 lun; + __u8 reserved1; + __u8 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) atapi; + struct { + __u16 id; + __u64 lun; + __u16 reserved1; + __u32 reserved2; + } __attribute__ ((packed)) scsi; + struct { + __u64 serial_number; + __u64 reserved; + } __attribute__ ((packed)) usb; + struct { + __u64 eui; + __u64 reserved; + } __attribute__ ((packed)) i1394; + struct { + __u64 wwid; + __u64 lun; + } __attribute__ ((packed)) fibre; + struct { + __u64 identity_tag; + __u64 reserved; + } __attribute__ ((packed)) i2o; + struct { + __u32 array_number; + __u32 reserved1; + __u64 reserved2; + } __attribute__ ((packed)) raid; + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) sata; + struct { + __u64 reserved1; + __u64 reserved2; + } __attribute__ ((packed)) unknown; + } device_path; + __u8 reserved4; + __u8 checksum; +} __attribute__ ((packed)); + +struct edd_info { + __u8 device; + __u8 version; + __u16 interface_support; + __u16 legacy_max_cylinder; + __u8 legacy_max_head; + __u8 legacy_sectors_per_track; + struct edd_device_params params; +} __attribute__ ((packed)); + +struct edd { + unsigned int mbr_signature[16]; + struct edd_info edd_info[6]; + unsigned char mbr_signature_nr; + unsigned char edd_info_nr; +}; +# 25 "./include/linux/edd.h" 2 + + +extern struct edd edd; +# 46 "./arch/x86/include/uapi/asm/bootparam.h" 2 +# 1 "./arch/x86/include/asm/ist.h" 1 +# 9 "./arch/x86/include/asm/ist.h" +# 1 "./arch/x86/include/uapi/asm/ist.h" 1 +# 23 "./arch/x86/include/uapi/asm/ist.h" +struct ist_info { + __u32 signature; + __u32 command; + __u32 event; + __u32 perf_level; +}; +# 10 "./arch/x86/include/asm/ist.h" 2 + + +extern struct ist_info ist_info; +# 47 "./arch/x86/include/uapi/asm/bootparam.h" 2 +# 1 "./include/video/edid.h" 1 + + + + +# 1 "./include/uapi/video/edid.h" 1 + + + + +struct edid_info { + unsigned char dummy[128]; +}; +# 6 "./include/video/edid.h" 2 + + +extern struct edid_info edid_info; +# 48 "./arch/x86/include/uapi/asm/bootparam.h" 2 + + +struct setup_data { + __u64 next; + __u32 type; + __u32 len; + __u8 data[0]; +}; + + +struct setup_indirect { + __u32 type; + __u32 reserved; + __u64 len; + __u64 addr; +}; + +struct setup_header { + __u8 setup_sects; + __u16 root_flags; + __u32 syssize; + __u16 ram_size; + __u16 vid_mode; + __u16 root_dev; + __u16 boot_flag; + __u16 jump; + __u32 header; + __u16 version; + __u32 realmode_swtch; + __u16 start_sys_seg; + __u16 kernel_version; + __u8 type_of_loader; + __u8 loadflags; + __u16 setup_move_size; + __u32 code32_start; + __u32 ramdisk_image; + __u32 ramdisk_size; + __u32 bootsect_kludge; + __u16 heap_end_ptr; + __u8 ext_loader_ver; + __u8 ext_loader_type; + __u32 cmd_line_ptr; + __u32 initrd_addr_max; + __u32 kernel_alignment; + __u8 relocatable_kernel; + __u8 min_alignment; + __u16 xloadflags; + __u32 cmdline_size; + __u32 hardware_subarch; + __u64 hardware_subarch_data; + __u32 payload_offset; + __u32 payload_length; + __u64 setup_data; + __u64 pref_address; + __u32 init_size; + __u32 handover_offset; + __u32 kernel_info_offset; +} __attribute__((packed)); + +struct sys_desc_table { + __u16 length; + __u8 table[14]; +}; + + +struct olpc_ofw_header { + __u32 ofw_magic; + __u32 ofw_version; + __u32 cif_handler; + __u32 irq_desc_table; +} __attribute__((packed)); + +struct efi_info { + __u32 efi_loader_signature; + __u32 efi_systab; + __u32 efi_memdesc_size; + __u32 efi_memdesc_version; + __u32 efi_memmap; + __u32 efi_memmap_size; + __u32 efi_systab_hi; + __u32 efi_memmap_hi; +}; +# 140 "./arch/x86/include/uapi/asm/bootparam.h" +struct boot_e820_entry { + __u64 addr; + __u64 size; + __u32 type; +} __attribute__((packed)); +# 155 "./arch/x86/include/uapi/asm/bootparam.h" +struct jailhouse_setup_data { + struct { + __u16 version; + __u16 compatible_version; + } __attribute__((packed)) hdr; + struct { + __u16 pm_timer_address; + __u16 num_cpus; + __u64 pci_mmconfig_base; + __u32 tsc_khz; + __u32 apic_khz; + __u8 standard_ioapic; + __u8 cpu_ids[255]; + } __attribute__((packed)) v1; + struct { + __u32 flags; + } __attribute__((packed)) v2; +} __attribute__((packed)); + + +struct boot_params { + struct screen_info screen_info; + struct apm_bios_info apm_bios_info; + __u8 _pad2[4]; + __u64 tboot_addr; + struct ist_info ist_info; + __u64 acpi_rsdp_addr; + __u8 _pad3[8]; + __u8 hd0_info[16]; + __u8 hd1_info[16]; + struct sys_desc_table sys_desc_table; + struct olpc_ofw_header olpc_ofw_header; + __u32 ext_ramdisk_image; + __u32 ext_ramdisk_size; + __u32 ext_cmd_line_ptr; + __u8 _pad4[116]; + struct edid_info edid_info; + struct efi_info efi_info; + __u32 alt_mem_k; + __u32 scratch; + __u8 e820_entries; + __u8 eddbuf_entries; + __u8 edd_mbr_sig_buf_entries; + __u8 kbd_status; + __u8 secure_boot; + __u8 _pad5[2]; +# 212 "./arch/x86/include/uapi/asm/bootparam.h" + __u8 sentinel; + __u8 _pad6[1]; + struct setup_header hdr; + __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)]; + __u32 edd_mbr_sig_buffer[16]; + struct boot_e820_entry e820_table[128]; + __u8 _pad8[48]; + struct edd_info eddbuf[6]; + __u8 _pad9[276]; +} __attribute__((packed)); +# 262 "./arch/x86/include/uapi/asm/bootparam.h" +enum x86_hardware_subarch { + X86_SUBARCH_PC = 0, + X86_SUBARCH_LGUEST, + X86_SUBARCH_XEN, + X86_SUBARCH_INTEL_MID, + X86_SUBARCH_CE4100, + X86_NR_SUBARCHS, +}; +# 18 "./arch/x86/include/asm/mem_encrypt.h" 2 + + + +extern u64 sme_me_mask; +extern bool sev_enabled; + +void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr, + unsigned long decrypted_kernel_vaddr, + unsigned long kernel_len, + unsigned long encryption_wa, + unsigned long encryption_pgd); + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) sme_early_encrypt(resource_size_t paddr, + unsigned long size); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) sme_early_decrypt(resource_size_t paddr, + unsigned long size); + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) sme_map_bootdata(char *real_mode_data); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) sme_unmap_bootdata(char *real_mode_data); + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) sme_early_init(void); + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) sme_encrypt_kernel(struct boot_params *bp); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) sme_enable(struct boot_params *bp); + +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) early_set_memory_decrypted(unsigned long vaddr, unsigned long size); +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) early_set_memory_encrypted(unsigned long vaddr, unsigned long size); + + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) mem_encrypt_init(void); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) mem_encrypt_free_decrypted_mem(void); + +bool sme_active(void); +bool sev_active(void); +# 93 "./arch/x86/include/asm/mem_encrypt.h" +extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mem_encrypt_active(void) +{ + return sme_me_mask; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 sme_get_me_mask(void) +{ + return sme_me_mask; +} +# 18 "./include/linux/mem_encrypt.h" 2 +# 8 "./arch/x86/include/asm/page_types.h" 2 +# 46 "./arch/x86/include/asm/page_types.h" +# 1 "./arch/x86/include/asm/page_64_types.h" 1 + + + + + +# 1 "./arch/x86/include/asm/kaslr.h" 1 + + + + +unsigned long kaslr_get_random_long(const char *purpose); + + +void kernel_randomize_memory(void); +void init_trampoline_kaslr(void); +# 7 "./arch/x86/include/asm/page_64_types.h" 2 +# 47 "./arch/x86/include/asm/page_types.h" 2 +# 56 "./arch/x86/include/asm/page_types.h" +extern phys_addr_t physical_mask; + + + + + +extern int devmem_is_allowed(unsigned long pagenr); + +extern unsigned long max_low_pfn_mapped; +extern unsigned long max_pfn_mapped; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) phys_addr_t get_max_mapped(void) +{ + return (phys_addr_t)max_pfn_mapped << 12; +} + +bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); + +extern void initmem_init(void); +# 10 "./arch/x86/include/asm/page.h" 2 + + +# 1 "./arch/x86/include/asm/page_64.h" 1 +# 11 "./arch/x86/include/asm/page_64.h" +extern unsigned long max_pfn; +extern unsigned long phys_base; + +extern unsigned long page_offset_base; +extern unsigned long vmalloc_base; +extern unsigned long vmemmap_base; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __phys_addr_nodebug(unsigned long x) +{ + unsigned long y = x - (0xffffffff80000000UL); + + + x = y + ((x > y) ? phys_base : ((0xffffffff80000000UL) - ((unsigned long)page_offset_base))); + + return x; +} + + +extern unsigned long __phys_addr(unsigned long); +extern unsigned long __phys_addr_symbol(unsigned long); +# 43 "./arch/x86/include/asm/page_64.h" +void clear_page_orig(void *page); +void clear_page_rep(void *page); +void clear_page_erms(void *page); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_page(void *page) +{ + asm volatile ("# ALT: oldinstr2\n" "661:\n\t" "call %P[old]" "\n662:\n" "# ALT: padding2\n" ".skip -((" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")) > 0) * " "(" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")), 0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+16)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" " .long 661b - .\n" " .long " "664""2""f - .\n" " .word " "( 9*32+ 9)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""2""f-""664""2""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "call %P[new1]" "\n" "665""1" ":\n" "# ALT: replacement " "2" "\n" "664""2"":\n\t" "call %P[new2]" "\n" "665""2" ":\n" ".popsection\n" : "=D" (page), "+r" (current_stack_pointer) : [old] "i" (clear_page_orig), [new1] "i" (clear_page_rep), [new2] "i" (clear_page_erms), "0" (page) : "cc", "memory", "rax", "rcx") + + + + + ; +} + +void copy_page(void *to, void *from); +# 13 "./arch/x86/include/asm/page.h" 2 + + + + + + +struct page; + +# 1 "./include/linux/range.h" 1 + + + + +struct range { + u64 start; + u64 end; +}; + +int add_range(struct range *range, int az, int nr_range, + u64 start, u64 end); + + +int add_range_with_merge(struct range *range, int az, int nr_range, + u64 start, u64 end); + +void subtract_range(struct range *range, int az, u64 start, u64 end); + +int clean_sort_range(struct range *range, int az); + +void sort_range(struct range *range, int nr_range); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) resource_size_t cap_resource(u64 val) +{ + if (val > ((resource_size_t)~0)) + return ((resource_size_t)~0); + + return val; +} +# 22 "./arch/x86/include/asm/page.h" 2 +extern struct range pfn_mapped[]; +extern int nr_pfn_mapped; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_user_page(void *page, unsigned long vaddr, + struct page *pg) +{ + clear_page(page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *topage) +{ + copy_page(to, from); +} +# 71 "./arch/x86/include/asm/page.h" +extern bool __virt_addr_valid(unsigned long kaddr); + + + + +# 1 "./include/asm-generic/memory_model.h" 1 + + + + +# 1 "./include/linux/pfn.h" 1 +# 13 "./include/linux/pfn.h" +typedef struct { + u64 val; +} pfn_t; +# 6 "./include/asm-generic/memory_model.h" 2 +# 77 "./arch/x86/include/asm/page.h" 2 +# 1 "./include/asm-generic/getorder.h" 1 +# 29 "./include/asm-generic/getorder.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__const__)) int get_order(unsigned long size) +{ + if (__builtin_constant_p(size)) { + if (!size) + return 64 - 12; + + if (size < (1UL << 12)) + return 0; + + return ( __builtin_constant_p((size) - 1) ? ( __builtin_constant_p((size) - 1) ? ( ((size) - 1) < 2 ? 0 : ((size) - 1) & (1ULL << 63) ? 63 : ((size) - 1) & (1ULL << 62) ? 62 : ((size) - 1) & (1ULL << 61) ? 61 : ((size) - 1) & (1ULL << 60) ? 60 : ((size) - 1) & (1ULL << 59) ? 59 : ((size) - 1) & (1ULL << 58) ? 58 : ((size) - 1) & (1ULL << 57) ? 57 : ((size) - 1) & (1ULL << 56) ? 56 : ((size) - 1) & (1ULL << 55) ? 55 : ((size) - 1) & (1ULL << 54) ? 54 : ((size) - 1) & (1ULL << 53) ? 53 : ((size) - 1) & (1ULL << 52) ? 52 : ((size) - 1) & (1ULL << 51) ? 51 : ((size) - 1) & (1ULL << 50) ? 50 : ((size) - 1) & (1ULL << 49) ? 49 : ((size) - 1) & (1ULL << 48) ? 48 : ((size) - 1) & (1ULL << 47) ? 47 : ((size) - 1) & (1ULL << 46) ? 46 : ((size) - 1) & (1ULL << 45) ? 45 : ((size) - 1) & (1ULL << 44) ? 44 : ((size) - 1) & (1ULL << 43) ? 43 : ((size) - 1) & (1ULL << 42) ? 42 : ((size) - 1) & (1ULL << 41) ? 41 : ((size) - 1) & (1ULL << 40) ? 40 : ((size) - 1) & (1ULL << 39) ? 39 : ((size) - 1) & (1ULL << 38) ? 38 : ((size) - 1) & (1ULL << 37) ? 37 : ((size) - 1) & (1ULL << 36) ? 36 : ((size) - 1) & (1ULL << 35) ? 35 : ((size) - 1) & (1ULL << 34) ? 34 : ((size) - 1) & (1ULL << 33) ? 33 : ((size) - 1) & (1ULL << 32) ? 32 : ((size) - 1) & (1ULL << 31) ? 31 : ((size) - 1) & (1ULL << 30) ? 30 : ((size) - 1) & (1ULL << 29) ? 29 : ((size) - 1) & (1ULL << 28) ? 28 : ((size) - 1) & (1ULL << 27) ? 27 : ((size) - 1) & (1ULL << 26) ? 26 : ((size) - 1) & (1ULL << 25) ? 25 : ((size) - 1) & (1ULL << 24) ? 24 : ((size) - 1) & (1ULL << 23) ? 23 : ((size) - 1) & (1ULL << 22) ? 22 : ((size) - 1) & (1ULL << 21) ? 21 : ((size) - 1) & (1ULL << 20) ? 20 : ((size) - 1) & (1ULL << 19) ? 19 : ((size) - 1) & (1ULL << 18) ? 18 : ((size) - 1) & (1ULL << 17) ? 17 : ((size) - 1) & (1ULL << 16) ? 16 : ((size) - 1) & (1ULL << 15) ? 15 : ((size) - 1) & (1ULL << 14) ? 14 : ((size) - 1) & (1ULL << 13) ? 13 : ((size) - 1) & (1ULL << 12) ? 12 : ((size) - 1) & (1ULL << 11) ? 11 : ((size) - 1) & (1ULL << 10) ? 10 : ((size) - 1) & (1ULL << 9) ? 9 : ((size) - 1) & (1ULL << 8) ? 8 : ((size) - 1) & (1ULL << 7) ? 7 : ((size) - 1) & (1ULL << 6) ? 6 : ((size) - 1) & (1ULL << 5) ? 5 : ((size) - 1) & (1ULL << 4) ? 4 : ((size) - 1) & (1ULL << 3) ? 3 : ((size) - 1) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof((size) - 1) <= 4) ? __ilog2_u32((size) - 1) : __ilog2_u64((size) - 1) ) - 12 + 1; + } + + size--; + size >>= 12; + + + + return fls64(size); + +} +# 78 "./arch/x86/include/asm/page.h" 2 +# 13 "./arch/x86/include/asm/thread_info.h" 2 + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 15 "./arch/x86/include/asm/thread_info.h" 2 +# 52 "./arch/x86/include/asm/thread_info.h" +struct task_struct; +# 1 "./arch/x86/include/asm/cpufeature.h" 1 + + + + +# 1 "./arch/x86/include/asm/processor.h" 1 + + + + +# 1 "./arch/x86/include/asm/processor-flags.h" 1 + + + + +# 1 "./arch/x86/include/uapi/asm/processor-flags.h" 1 +# 6 "./arch/x86/include/asm/processor-flags.h" 2 +# 6 "./arch/x86/include/asm/processor.h" 2 + + +struct task_struct; +struct mm_struct; +struct io_bitmap; +struct vm86; + +# 1 "./arch/x86/include/asm/math_emu.h" 1 + + + + +# 1 "./arch/x86/include/asm/ptrace.h" 1 + + + + +# 1 "./arch/x86/include/asm/segment.h" 1 +# 249 "./arch/x86/include/asm/segment.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long vdso_encode_cpunode(int cpu, unsigned long node) +{ + return (node << 12) | cpu; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vdso_read_cpunode(unsigned *cpu, unsigned *node) +{ + unsigned int p; +# 266 "./arch/x86/include/asm/segment.h" + asm volatile ("# ALT: oldnstr\n" "661:\n\t" "lsl %[seg],%[p]" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "(16*32+22)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" ".byte 0xf3,0x0f,0xc7,0xf8" "\n" "665""1" ":\n" ".popsection\n" : [p] "=a" (p) : "i" (0), [seg] "r" ((15*8 + 3))) + + + ; + + if (cpu) + *cpu = (p & 0xfff); + if (node) + *node = (p >> 12); +} +# 301 "./arch/x86/include/asm/segment.h" +extern const char early_idt_handler_array[32][9]; +extern void early_ignore_irq(void); + + +extern const char xen_early_idt_handler_array[32][8]; +# 347 "./arch/x86/include/asm/segment.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __loadsegment_fs(unsigned short value) +{ + asm volatile(" \n" + "1: movw %0, %%fs \n" + "2: \n" + + " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long (" "ex_handler_clear_fs" ") - .\n" " .popsection\n" + + : : "rm" (value) : "memory"); +} +# 6 "./arch/x86/include/asm/ptrace.h" 2 + +# 1 "./arch/x86/include/uapi/asm/ptrace.h" 1 + + + + + +# 1 "./arch/x86/include/uapi/asm/ptrace-abi.h" 1 +# 7 "./arch/x86/include/uapi/asm/ptrace.h" 2 +# 8 "./arch/x86/include/asm/ptrace.h" 2 +# 56 "./arch/x86/include/asm/ptrace.h" +struct pt_regs { + + + + + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + unsigned long bp; + unsigned long bx; + + unsigned long r11; + unsigned long r10; + unsigned long r9; + unsigned long r8; + unsigned long ax; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + + + + + unsigned long orig_ax; + + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; + +}; + + + + +# 1 "./arch/x86/include/asm/paravirt_types.h" 1 +# 43 "./arch/x86/include/asm/paravirt_types.h" +# 1 "./arch/x86/include/asm/desc_defs.h" 1 +# 16 "./arch/x86/include/asm/desc_defs.h" +struct desc_struct { + u16 limit0; + u16 base0; + u16 base1: 8, type: 4, s: 1, dpl: 2, p: 1; + u16 limit1: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; +} __attribute__((packed)); +# 40 "./arch/x86/include/asm/desc_defs.h" +enum { + GATE_INTERRUPT = 0xE, + GATE_TRAP = 0xF, + GATE_CALL = 0xC, + GATE_TASK = 0x5, +}; + +enum { + DESC_TSS = 0x9, + DESC_LDT = 0x2, + DESCTYPE_S = 0x10, +}; + + +struct ldttss_desc { + u16 limit0; + u16 base0; + + u16 base1 : 8, type : 5, dpl : 2, p : 1; + u16 limit1 : 4, zero0 : 3, g : 1, base2 : 8; + + u32 base3; + u32 zero1; + +} __attribute__((packed)); + +typedef struct ldttss_desc ldt_desc; +typedef struct ldttss_desc tss_desc; + +struct idt_bits { + u16 ist : 3, + zero : 5, + type : 5, + dpl : 2, + p : 1; +} __attribute__((packed)); + +struct gate_struct { + u16 offset_low; + u16 segment; + struct idt_bits bits; + u16 offset_middle; + + u32 offset_high; + u32 reserved; + +} __attribute__((packed)); + +typedef struct gate_struct gate_desc; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long gate_offset(const gate_desc *g) +{ + + return g->offset_low | ((unsigned long)g->offset_middle << 16) | + ((unsigned long) g->offset_high << 32); + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long gate_segment(const gate_desc *g) +{ + return g->segment; +} + +struct desc_ptr { + unsigned short size; + unsigned long address; +} __attribute__((packed)) ; +# 44 "./arch/x86/include/asm/paravirt_types.h" 2 +# 1 "./arch/x86/include/asm/kmap_types.h" 1 +# 9 "./arch/x86/include/asm/kmap_types.h" +# 1 "./include/asm-generic/kmap_types.h" 1 +# 10 "./arch/x86/include/asm/kmap_types.h" 2 +# 45 "./arch/x86/include/asm/paravirt_types.h" 2 +# 1 "./arch/x86/include/asm/pgtable_types.h" 1 +# 143 "./arch/x86/include/asm/pgtable_types.h" +enum page_cache_mode { + _PAGE_CACHE_MODE_WB = 0, + _PAGE_CACHE_MODE_WC = 1, + _PAGE_CACHE_MODE_UC_MINUS = 2, + _PAGE_CACHE_MODE_UC = 3, + _PAGE_CACHE_MODE_WT = 4, + _PAGE_CACHE_MODE_WP = 5, + + _PAGE_CACHE_MODE_NUM = 8 +}; +# 265 "./arch/x86/include/asm/pgtable_types.h" +# 1 "./arch/x86/include/asm/pgtable_64_types.h" 1 + + + + +# 1 "./arch/x86/include/asm/sparsemem.h" 1 +# 6 "./arch/x86/include/asm/pgtable_64_types.h" 2 +# 14 "./arch/x86/include/asm/pgtable_64_types.h" +typedef unsigned long pteval_t; +typedef unsigned long pmdval_t; +typedef unsigned long pudval_t; +typedef unsigned long p4dval_t; +typedef unsigned long pgdval_t; +typedef unsigned long pgprotval_t; + +typedef struct { pteval_t pte; } pte_t; + + +extern unsigned int __pgtable_l5_enabled; +# 43 "./arch/x86/include/asm/pgtable_64_types.h" +extern unsigned int pgdir_shift; +extern unsigned int ptrs_per_p4d; +# 266 "./arch/x86/include/asm/pgtable_types.h" 2 +# 281 "./arch/x86/include/asm/pgtable_types.h" +typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; + +typedef struct { pgdval_t pgd; } pgd_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t pgprot_nx(pgprot_t prot) +{ + return ((pgprot_t) { (((prot).pgprot) | (((pteval_t)(1)) << 63)) } ); +} +# 314 "./arch/x86/include/asm/pgtable_types.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t native_make_pgd(pgdval_t val) +{ + return (pgd_t) { val & (~0ULL) }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgdval_t native_pgd_val(pgd_t pgd) +{ + return pgd.pgd & (~0ULL); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgdval_t pgd_flags(pgd_t pgd) +{ + return native_pgd_val(pgd) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask))); +} + + +typedef struct { p4dval_t p4d; } p4d_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t native_make_p4d(pudval_t val) +{ + return (p4d_t) { val }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t native_p4d_val(p4d_t p4d) +{ + return p4d.p4d; +} +# 356 "./arch/x86/include/asm/pgtable_types.h" +typedef struct { pudval_t pud; } pud_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t native_make_pud(pmdval_t val) +{ + return (pud_t) { val }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t native_pud_val(pud_t pud) +{ + return pud.pud; +} +# 382 "./arch/x86/include/asm/pgtable_types.h" +typedef struct { pmdval_t pmd; } pmd_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t native_make_pmd(pmdval_t val) +{ + return (pmd_t) { val }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t native_pmd_val(pmd_t pmd) +{ + return pmd.pmd; +} +# 407 "./arch/x86/include/asm/pgtable_types.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t p4d_pfn_mask(p4d_t p4d) +{ + + return ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t p4d_flags_mask(p4d_t p4d) +{ + return ~p4d_pfn_mask(p4d); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t p4d_flags(p4d_t p4d) +{ + return native_p4d_val(p4d) & p4d_flags_mask(p4d); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t pud_pfn_mask(pud_t pud) +{ + if (native_pud_val(pud) & (((pteval_t)(1)) << 7)) + return (((signed long)(~(((1UL) << 30)-1))) & physical_mask); + else + return ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t pud_flags_mask(pud_t pud) +{ + return ~pud_pfn_mask(pud); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t pud_flags(pud_t pud) +{ + return native_pud_val(pud) & pud_flags_mask(pud); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t pmd_pfn_mask(pmd_t pmd) +{ + if (native_pmd_val(pmd) & (((pteval_t)(1)) << 7)) + return (((signed long)(~(((1UL) << 21)-1))) & physical_mask); + else + return ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t pmd_flags_mask(pmd_t pmd) +{ + return ~pmd_pfn_mask(pmd); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t pmd_flags(pmd_t pmd) +{ + return native_pmd_val(pmd) & pmd_flags_mask(pmd); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t native_make_pte(pteval_t val) +{ + return (pte_t) { .pte = val }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pteval_t native_pte_val(pte_t pte) +{ + return pte.pte; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pteval_t pte_flags(pte_t pte) +{ + return native_pte_val(pte) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask))); +} +# 483 "./arch/x86/include/asm/pgtable_types.h" +unsigned long cachemode2protval(enum page_cache_mode pcm); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprotval_t protval_4k_2_large(pgprotval_t val) +{ + return (val & ~((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 12))) | + ((val & (((pteval_t)(1)) << 7)) << (12 - 7)); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t pgprot_4k_2_large(pgprot_t pgprot) +{ + return ((pgprot_t) { (protval_4k_2_large(((pgprot).pgprot))) } ); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprotval_t protval_large_2_4k(pgprotval_t val) +{ + return (val & ~((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 12))) | + ((val & (((pteval_t)(1)) << 12)) >> + (12 - 7)); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t pgprot_large_2_4k(pgprot_t pgprot) +{ + return ((pgprot_t) { (protval_large_2_4k(((pgprot).pgprot))) } ); +} + + +typedef struct page *pgtable_t; + +extern pteval_t __supported_pte_mask; +extern pteval_t __default_kernel_pte_mask; +extern void set_nx(void); +extern int nx_enabled; + + +extern pgprot_t pgprot_writecombine(pgprot_t prot); + + +extern pgprot_t pgprot_writethrough(pgprot_t prot); + + + + + +struct file; +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); + + +void set_pte_vaddr(unsigned long vaddr, pte_t pte); + + + + + + + +struct seq_file; +extern void arch_report_meminfo(struct seq_file *m); + +enum pg_level { + PG_LEVEL_NONE, + PG_LEVEL_4K, + PG_LEVEL_2M, + PG_LEVEL_1G, + PG_LEVEL_512G, + PG_LEVEL_NUM +}; + + +extern void update_page_count(int level, unsigned long pages); +# 560 "./arch/x86/include/asm/pgtable_types.h" +extern pte_t *lookup_address(unsigned long address, unsigned int *level); +extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, + unsigned int *level); + +struct mm_struct; +extern pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address, + unsigned int *level); +extern pmd_t *lookup_pmd_address(unsigned long address); +extern phys_addr_t slow_virt_to_phys(void *__address); +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, + unsigned long address, + unsigned numpages, + unsigned long page_flags); +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, + unsigned long numpages); +# 46 "./arch/x86/include/asm/paravirt_types.h" 2 +# 1 "./arch/x86/include/asm/nospec-branch.h" 1 + + + + + +# 1 "./include/linux/static_key.h" 1 +# 7 "./arch/x86/include/asm/nospec-branch.h" 2 +# 1 "./include/linux/frame.h" 1 +# 8 "./arch/x86/include/asm/nospec-branch.h" 2 + + +# 1 "./arch/x86/include/asm/alternative-asm.h" 1 +# 11 "./arch/x86/include/asm/nospec-branch.h" 2 + +# 1 "./arch/x86/include/asm/msr-index.h" 1 +# 13 "./arch/x86/include/asm/nospec-branch.h" 2 +# 1 "./arch/x86/include/asm/unwind_hints.h" 1 + + + +# 1 "./arch/x86/include/asm/orc_types.h" 1 +# 72 "./arch/x86/include/asm/orc_types.h" +struct orc_entry { + s16 sp_offset; + s16 bp_offset; + unsigned sp_reg:4; + unsigned bp_reg:4; + unsigned type:2; + unsigned end:1; +} __attribute__((__packed__)); + + + + + + + +struct unwind_hint { + u32 ip; + s16 sp_offset; + u8 sp_reg; + u8 type; + u8 end; +}; +# 5 "./arch/x86/include/asm/unwind_hints.h" 2 +# 14 "./arch/x86/include/asm/nospec-branch.h" 2 +# 177 "./arch/x86/include/asm/nospec-branch.h" +enum spectre_v2_mitigation { + SPECTRE_V2_NONE, + SPECTRE_V2_RETPOLINE_GENERIC, + SPECTRE_V2_RETPOLINE_AMD, + SPECTRE_V2_IBRS_ENHANCED, +}; + + +enum spectre_v2_user_mitigation { + SPECTRE_V2_USER_NONE, + SPECTRE_V2_USER_STRICT, + SPECTRE_V2_USER_STRICT_PREFERRED, + SPECTRE_V2_USER_PRCTL, + SPECTRE_V2_USER_SECCOMP, +}; + + +enum ssb_mitigation { + SPEC_STORE_BYPASS_NONE, + SPEC_STORE_BYPASS_DISABLE, + SPEC_STORE_BYPASS_PRCTL, + SPEC_STORE_BYPASS_SECCOMP, +}; + +extern char __indirect_thunk_start[]; +extern char __indirect_thunk_end[]; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) +void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) +{ + asm volatile("# ALT: oldnstr\n" "661:\n\t" "" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "%c[feature]" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "wrmsr" "\n" "665""1" ":\n" ".popsection\n" + : : "c" (msr), + "a" ((u32)val), + "d" ((u32)(val >> 32)), + [feature] "i" (feature) + : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void indirect_branch_prediction_barrier(void) +{ + u64 val = ((((1UL))) << (0)); + + alternative_msr_write(0x00000049, val, ( 7*32+21)); +} + + +extern u64 x86_spec_ctrl_base; +# 249 "./arch/x86/include/asm/nospec-branch.h" +extern struct static_key_false switch_to_cond_stibp; +extern struct static_key_false switch_mm_cond_ibpb; +extern struct static_key_false switch_mm_always_ibpb; + +extern struct static_key_false mds_user_clear; +extern struct static_key_false mds_idle_clear; +# 265 "./arch/x86/include/asm/nospec-branch.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void mds_clear_cpu_buffers(void) +{ + static const u16 ds = (3*8); +# 278 "./arch/x86/include/asm/nospec-branch.h" + asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void mds_user_clear_cpu_buffers(void) +{ + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&mds_user_clear), struct static_key_true)) branch = !arch_static_branch(&(&mds_user_clear)->key, true); else if (__builtin_types_compatible_p(typeof(*&mds_user_clear), struct static_key_false)) branch = !arch_static_branch_jump(&(&mds_user_clear)->key, true); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 1); })) + mds_clear_cpu_buffers(); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mds_idle_clear_cpu_buffers(void) +{ + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&mds_idle_clear), struct static_key_true)) branch = !arch_static_branch(&(&mds_idle_clear)->key, true); else if (__builtin_types_compatible_p(typeof(*&mds_idle_clear), struct static_key_false)) branch = !arch_static_branch_jump(&(&mds_idle_clear)->key, true); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 1); })) + mds_clear_cpu_buffers(); +} +# 47 "./arch/x86/include/asm/paravirt_types.h" 2 + +struct page; +struct thread_struct; +struct desc_ptr; +struct tss_struct; +struct mm_struct; +struct desc_struct; +struct task_struct; +struct cpumask; +struct flush_tlb_info; +struct mmu_gather; +struct vm_area_struct; + + + + + +struct paravirt_callee_save { + void *func; +}; + + +struct pv_info { + + unsigned int kernel_rpl; + int shared_kernel_pmd; + + + u16 extra_user_64bit_cs; + + + + const char *name; +}; + +struct pv_init_ops { +# 91 "./arch/x86/include/asm/paravirt_types.h" + unsigned (*patch)(u8 type, void *insn_buff, + unsigned long addr, unsigned len); +} ; + + +struct pv_lazy_ops { + + void (*enter)(void); + void (*leave)(void); + void (*flush)(void); +} ; + + +struct pv_time_ops { + unsigned long long (*sched_clock)(void); + unsigned long long (*steal_clock)(int cpu); +} ; + +struct pv_cpu_ops { + + void (*io_delay)(void); + + + unsigned long (*get_debugreg)(int regno); + void (*set_debugreg)(int regno, unsigned long value); + + unsigned long (*read_cr0)(void); + void (*write_cr0)(unsigned long); + + void (*write_cr4)(unsigned long); + + + void (*load_tr_desc)(void); + void (*load_gdt)(const struct desc_ptr *); + void (*load_idt)(const struct desc_ptr *); + void (*set_ldt)(const void *desc, unsigned entries); + unsigned long (*store_tr)(void); + void (*load_tls)(struct thread_struct *t, unsigned int cpu); + + void (*load_gs_index)(unsigned int idx); + + void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, + const void *desc); + void (*write_gdt_entry)(struct desc_struct *, + int entrynum, const void *desc, int size); + void (*write_idt_entry)(gate_desc *, + int entrynum, const gate_desc *gate); + void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); + void (*free_ldt)(struct desc_struct *ldt, unsigned entries); + + void (*load_sp0)(unsigned long sp0); + + + void (*update_io_bitmap)(void); + + + void (*wbinvd)(void); + + + void (*cpuid)(unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx); + + + u64 (*read_msr)(unsigned int msr); + void (*write_msr)(unsigned int msr, unsigned low, unsigned high); + + + + + + u64 (*read_msr_safe)(unsigned int msr, int *err); + int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); + + u64 (*read_pmc)(int counter); + + + + + + + + void (*usergs_sysret64)(void); + + + + void (*iret)(void); + + void (*swapgs)(void); + + void (*start_context_switch)(struct task_struct *prev); + void (*end_context_switch)(struct task_struct *next); + +} ; + +struct pv_irq_ops { +# 196 "./arch/x86/include/asm/paravirt_types.h" + struct paravirt_callee_save save_fl; + struct paravirt_callee_save restore_fl; + struct paravirt_callee_save irq_disable; + struct paravirt_callee_save irq_enable; + + void (*safe_halt)(void); + void (*halt)(void); + +} ; + +struct pv_mmu_ops { + + void (*flush_tlb_user)(void); + void (*flush_tlb_kernel)(void); + void (*flush_tlb_one_user)(unsigned long addr); + void (*flush_tlb_others)(const struct cpumask *cpus, + const struct flush_tlb_info *info); + + void (*tlb_remove_table)(struct mmu_gather *tlb, void *table); + + + void (*exit_mmap)(struct mm_struct *mm); + + + struct paravirt_callee_save read_cr2; + void (*write_cr2)(unsigned long); + + unsigned long (*read_cr3)(void); + void (*write_cr3)(unsigned long); + + + void (*activate_mm)(struct mm_struct *prev, + struct mm_struct *next); + void (*dup_mmap)(struct mm_struct *oldmm, + struct mm_struct *mm); + + + int (*pgd_alloc)(struct mm_struct *mm); + void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); + + + + + + void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); + void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); + void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); + void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn); + void (*release_pte)(unsigned long pfn); + void (*release_pmd)(unsigned long pfn); + void (*release_pud)(unsigned long pfn); + void (*release_p4d)(unsigned long pfn); + + + void (*set_pte)(pte_t *ptep, pte_t pteval); + void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval); + void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); + + pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); + void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t pte); + + struct paravirt_callee_save pte_val; + struct paravirt_callee_save make_pte; + + struct paravirt_callee_save pgd_val; + struct paravirt_callee_save make_pgd; +# 275 "./arch/x86/include/asm/paravirt_types.h" + void (*set_pud)(pud_t *pudp, pud_t pudval); + + struct paravirt_callee_save pmd_val; + struct paravirt_callee_save make_pmd; + + + struct paravirt_callee_save pud_val; + struct paravirt_callee_save make_pud; + + void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); + + + struct paravirt_callee_save p4d_val; + struct paravirt_callee_save make_p4d; + + void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); + + + + + + + struct pv_lazy_ops lazy_mode; + + + + + + void (*set_fixmap)(unsigned idx, + phys_addr_t phys, pgprot_t flags); + +} ; + +struct arch_spinlock; + +# 1 "./arch/x86/include/asm/spinlock_types.h" 1 + + + + + +# 1 "./include/asm-generic/qspinlock_types.h" 1 +# 22 "./include/asm-generic/qspinlock_types.h" +typedef struct qspinlock { + union { + atomic_t val; + + + + + + + + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; +# 51 "./include/asm-generic/qspinlock_types.h" + }; +} arch_spinlock_t; +# 7 "./arch/x86/include/asm/spinlock_types.h" 2 +# 1 "./include/asm-generic/qrwlock_types.h" 1 + + + + + + +# 1 "./arch/x86/include/asm/spinlock_types.h" 1 +# 8 "./include/asm-generic/qrwlock_types.h" 2 + + + + + +typedef struct qrwlock { + union { + atomic_t cnts; + struct { + + u8 wlocked; + u8 __lstate[3]; + + + + + }; + }; + arch_spinlock_t wait_lock; +} arch_rwlock_t; +# 8 "./arch/x86/include/asm/spinlock_types.h" 2 +# 311 "./arch/x86/include/asm/paravirt_types.h" 2 + + +struct qspinlock; + +struct pv_lock_ops { + void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); + struct paravirt_callee_save queued_spin_unlock; + + void (*wait)(u8 *ptr, u8 val); + void (*kick)(int cpu); + + struct paravirt_callee_save vcpu_is_preempted; +} ; + + + + +struct paravirt_patch_template { + struct pv_init_ops init; + struct pv_time_ops time; + struct pv_cpu_ops cpu; + struct pv_irq_ops irq; + struct pv_mmu_ops mmu; + struct pv_lock_ops lock; +} ; + +extern struct pv_info pv_info; +extern struct paravirt_patch_template pv_ops; +# 370 "./arch/x86/include/asm/paravirt_types.h" +unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len); +unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len); +unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end); + +unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len); + +int paravirt_disable_iospace(void); +# 651 "./arch/x86/include/asm/paravirt_types.h" +enum paravirt_lazy_mode { + PARAVIRT_LAZY_NONE, + PARAVIRT_LAZY_MMU, + PARAVIRT_LAZY_CPU, +}; + +enum paravirt_lazy_mode paravirt_get_lazy_mode(void); +void paravirt_start_context_switch(struct task_struct *prev); +void paravirt_end_context_switch(struct task_struct *next); + +void paravirt_enter_lazy_mmu(void); +void paravirt_leave_lazy_mmu(void); +void paravirt_flush_lazy_mmu(void); + +void _paravirt_nop(void); +u64 _paravirt_ident_64(u64); + + + + +struct paravirt_patch_site { + u8 *instr; + u8 type; + u8 len; +}; + +extern struct paravirt_patch_site __parainstructions[], + __parainstructions_end[]; +# 95 "./arch/x86/include/asm/ptrace.h" 2 + + +struct cpuinfo_x86; +struct task_struct; + +extern unsigned long profile_pc(struct pt_regs *regs); + +extern unsigned long +convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); +extern void send_sigtrap(struct pt_regs *regs, int error_code, int si_code); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->ax; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void regs_set_return_value(struct pt_regs *regs, unsigned long rc) +{ + regs->ax = rc; +} +# 126 "./arch/x86/include/asm/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int user_mode(struct pt_regs *regs) +{ + + + + return !!(regs->cs & 3); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int v8086_mode(struct pt_regs *regs) +{ + + + + return 0; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool user_64bit_mode(struct pt_regs *regs) +{ +# 155 "./arch/x86/include/asm/ptrace.h" + return regs->cs == (6*8 + 3) || regs->cs == pv_info.extra_user_64bit_cs; + + + + +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool any_64bit_mode(struct pt_regs *regs) +{ + + return !user_mode(regs) || user_64bit_mode(regs); + + + +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->sp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long instruction_pointer(struct pt_regs *regs) +{ + return regs->ip; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->ip = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long frame_pointer(struct pt_regs *regs) +{ + return regs->bp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return regs->sp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->sp = val; +} + + +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); +# 226 "./arch/x86/include/asm/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (__builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs, ss))), 0)) + return 0; +# 243 "./arch/x86/include/asm/ptrace.h" + return *(unsigned long *)((unsigned long)regs + offset); +} +# 254 "./arch/x86/include/asm/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int regs_within_kernel_stack(struct pt_regs *regs, + unsigned long addr) +{ + return ((addr & ~((((1UL) << 12) << (2 + 1)) - 1)) == (regs->sp & ~((((1UL) << 12) << (2 + 1)) - 1))); +} +# 269 "./arch/x86/include/asm/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n) +{ + unsigned long *addr = (unsigned long *)regs->sp; + + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return addr; + else + return ((void *)0); +} + + +extern long copy_from_kernel_nofault(void *dst, const void *src, size_t size); +# 292 "./arch/x86/include/asm/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n) +{ + unsigned long *addr; + unsigned long val; + long ret; + + addr = regs_get_kernel_stack_nth_addr(regs, n); + if (addr) { + ret = copy_from_kernel_nofault(&val, addr, sizeof(val)); + if (!ret) + return val; + } + return 0; +} +# 319 "./arch/x86/include/asm/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long regs_get_kernel_argument(struct pt_regs *regs, + unsigned int n) +{ + static const unsigned int argument_offs[] = { + + + + + + + __builtin_offsetof(struct pt_regs, di), + __builtin_offsetof(struct pt_regs, si), + __builtin_offsetof(struct pt_regs, dx), + __builtin_offsetof(struct pt_regs, cx), + __builtin_offsetof(struct pt_regs, r8), + __builtin_offsetof(struct pt_regs, r9), + + + }; + + if (n >= 6) { + n -= 6 - 1; + return regs_get_kernel_stack_nth(regs, n); + } else + return regs_get_register(regs, argument_offs[n]); +} +# 355 "./arch/x86/include/asm/ptrace.h" +struct user_desc; +extern int do_get_thread_area(struct task_struct *p, int idx, + struct user_desc *info); +extern int do_set_thread_area(struct task_struct *p, int idx, + struct user_desc *info, int can_allocate); +# 6 "./arch/x86/include/asm/math_emu.h" 2 + + + + + +struct math_emu_info { + long ___orig_eip; + struct pt_regs *regs; +}; +# 14 "./arch/x86/include/asm/processor.h" 2 + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 16 "./arch/x86/include/asm/processor.h" 2 +# 1 "./arch/x86/include/uapi/asm/sigcontext.h" 1 +# 40 "./arch/x86/include/uapi/asm/sigcontext.h" +struct _fpx_sw_bytes { + + + + + __u32 magic1; +# 54 "./arch/x86/include/uapi/asm/sigcontext.h" + __u32 extended_size; + + + + + + __u64 xfeatures; + + + + + + __u32 xstate_size; + + + __u32 padding[7]; +}; +# 85 "./arch/x86/include/uapi/asm/sigcontext.h" +struct _fpreg { + __u16 significand[4]; + __u16 exponent; +}; + + +struct _fpxreg { + __u16 significand[4]; + __u16 exponent; + __u16 padding[3]; +}; + + +struct _xmmreg { + __u32 element[4]; +}; + + + + + + +struct _fpstate_32 { + + __u32 cw; + __u32 sw; + __u32 tag; + __u32 ipoff; + __u32 cssel; + __u32 dataoff; + __u32 datasel; + struct _fpreg _st[8]; + __u16 status; + __u16 magic; + + + + __u32 _fxsr_env[6]; + __u32 mxcsr; + __u32 reserved; + struct _fpxreg _fxsr_st[8]; + struct _xmmreg _xmm[8]; + union { + __u32 padding1[44]; + __u32 padding[44]; + }; + + union { + __u32 padding2[12]; + struct _fpx_sw_bytes sw_reserved; + }; +}; +# 149 "./arch/x86/include/uapi/asm/sigcontext.h" +struct _fpstate_64 { + __u16 cwd; + __u16 swd; + + __u16 twd; + __u16 fop; + __u64 rip; + __u64 rdp; + __u32 mxcsr; + __u32 mxcsr_mask; + __u32 st_space[32]; + __u32 xmm_space[64]; + __u32 reserved2[12]; + union { + __u32 reserved3[12]; + struct _fpx_sw_bytes sw_reserved; + }; +}; + + + + + + + +struct _header { + __u64 xfeatures; + __u64 reserved1[2]; + __u64 reserved2[5]; +}; + +struct _ymmh_state { + + __u32 ymmh_space[64]; +}; +# 192 "./arch/x86/include/uapi/asm/sigcontext.h" +struct _xstate { + struct _fpstate_64 fpstate; + struct _header xstate_hdr; + struct _ymmh_state ymmh; + +}; + + + + +struct sigcontext_32 { + __u16 gs, __gsh; + __u16 fs, __fsh; + __u16 es, __esh; + __u16 ds, __dsh; + __u32 di; + __u32 si; + __u32 bp; + __u32 sp; + __u32 bx; + __u32 dx; + __u32 cx; + __u32 ax; + __u32 trapno; + __u32 err; + __u32 ip; + __u16 cs, __csh; + __u32 flags; + __u32 sp_at_signal; + __u16 ss, __ssh; +# 230 "./arch/x86/include/uapi/asm/sigcontext.h" + __u32 fpstate; + __u32 oldmask; + __u32 cr2; +}; + + + + +struct sigcontext_64 { + __u64 r8; + __u64 r9; + __u64 r10; + __u64 r11; + __u64 r12; + __u64 r13; + __u64 r14; + __u64 r15; + __u64 di; + __u64 si; + __u64 bp; + __u64 bx; + __u64 dx; + __u64 ax; + __u64 cx; + __u64 sp; + __u64 ip; + __u64 flags; + __u16 cs; + __u16 gs; + __u16 fs; + __u16 ss; + __u64 err; + __u64 trapno; + __u64 oldmask; + __u64 cr2; +# 273 "./arch/x86/include/uapi/asm/sigcontext.h" + __u64 fpstate; + __u64 reserved1[8]; +}; +# 17 "./arch/x86/include/asm/processor.h" 2 + + + + + +# 1 "./arch/x86/include/asm/msr.h" 1 + + + + +# 1 "./arch/x86/include/asm/msr-index.h" 1 +# 6 "./arch/x86/include/asm/msr.h" 2 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/errno.h" 1 +# 11 "./arch/x86/include/asm/msr.h" 2 +# 1 "./arch/x86/include/asm/cpumask.h" 1 + + + + +# 1 "./include/linux/cpumask.h" 1 +# 12 "./include/linux/cpumask.h" +# 1 "./include/linux/bitmap.h" 1 +# 9 "./include/linux/bitmap.h" +# 1 "./include/linux/string.h" 1 +# 10 "./include/linux/string.h" +# 1 "./include/uapi/linux/string.h" 1 +# 11 "./include/linux/string.h" 2 + +extern char *strndup_user(const char *, long); +extern void *memdup_user(const void *, size_t); +extern void *vmemdup_user(const void *, size_t); +extern void *memdup_user_nul(const void *, size_t); + + + + +# 1 "./arch/x86/include/asm/string.h" 1 + + + + +# 1 "./arch/x86/include/asm/string_64.h" 1 +# 14 "./arch/x86/include/asm/string_64.h" +extern void *memcpy(void *to, const void *from, size_t len); +extern void *__memcpy(void *to, const void *from, size_t len); + + +void *memset(void *s, int c, size_t n); +void *__memset(void *s, int c, size_t n); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset16(uint16_t *s, uint16_t v, size_t n) +{ + long d0, d1; + asm volatile("rep\n\t" + "stosw" + : "=&c" (d0), "=&D" (d1) + : "a" (v), "1" (s), "0" (n) + : "memory"); + return s; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset32(uint32_t *s, uint32_t v, size_t n) +{ + long d0, d1; + asm volatile("rep\n\t" + "stosl" + : "=&c" (d0), "=&D" (d1) + : "a" (v), "1" (s), "0" (n) + : "memory"); + return s; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset64(uint64_t *s, uint64_t v, size_t n) +{ + long d0, d1; + asm volatile("rep\n\t" + "stosq" + : "=&c" (d0), "=&D" (d1) + : "a" (v), "1" (s), "0" (n) + : "memory"); + return s; +} + + +void *memmove(void *dest, const void *src, size_t count); +void *__memmove(void *dest, const void *src, size_t count); + +int memcmp(const void *cs, const void *ct, size_t count); +size_t strlen(const char *s); +char *strcpy(char *dest, const char *src); +char *strcat(char *dest, const char *src); +int strcmp(const char *cs, const char *ct); +# 86 "./arch/x86/include/asm/string_64.h" +__attribute__((__warn_unused_result__)) unsigned long __memcpy_mcsafe(void *dst, const void *src, + size_t cnt); +extern struct static_key_false mcsafe_key; +# 105 "./arch/x86/include/asm/string_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +memcpy_mcsafe(void *dst, const void *src, size_t cnt) +{ + + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&mcsafe_key), struct static_key_true)) branch = arch_static_branch_jump(&(&mcsafe_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&mcsafe_key), struct static_key_false)) branch = arch_static_branch(&(&mcsafe_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); })) + return __memcpy_mcsafe(dst, src, cnt); + else + + memcpy(dst, src, cnt); + return 0; +} + + + +void __memcpy_flushcache(void *dst, const void *src, size_t cnt); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void memcpy_flushcache(void *dst, const void *src, size_t cnt) +{ + if (__builtin_constant_p(cnt)) { + switch (cnt) { + case 4: + asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src)); + return; + case 8: + asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src)); + return; + case 16: + asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src)); + asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8))); + return; + } + } + __memcpy_flushcache(dst, src, cnt); +} +# 6 "./arch/x86/include/asm/string.h" 2 +# 21 "./include/linux/string.h" 2 + + +extern char * strcpy(char *,const char *); + + +extern char * strncpy(char *,const char *, __kernel_size_t); + + +size_t strlcpy(char *, const char *, size_t); + + +ssize_t strscpy(char *, const char *, size_t); + + + +ssize_t strscpy_pad(char *dest, const char *src, size_t count); + + +extern char * strcat(char *, const char *); + + +extern char * strncat(char *, const char *, __kernel_size_t); + + +extern size_t strlcat(char *, const char *, __kernel_size_t); + + +extern int strcmp(const char *,const char *); + + +extern int strncmp(const char *,const char *,__kernel_size_t); + + +extern int strcasecmp(const char *s1, const char *s2); + + +extern int strncasecmp(const char *s1, const char *s2, size_t n); + + +extern char * strchr(const char *,int); + + +extern char * strchrnul(const char *,int); + +extern char * strnchrnul(const char *, size_t, int); + +extern char * strnchr(const char *, size_t, int); + + +extern char * strrchr(const char *,int); + +extern char * __attribute__((__warn_unused_result__)) skip_spaces(const char *); + +extern char *strim(char *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) char *strstrip(char *str) +{ + return strim(str); +} + + +extern char * strstr(const char *, const char *); + + +extern char * strnstr(const char *, const char *, size_t); + + +extern __kernel_size_t strlen(const char *); + + +extern __kernel_size_t strnlen(const char *,__kernel_size_t); + + +extern char * strpbrk(const char *,const char *); + + +extern char * strsep(char **,const char *); + + +extern __kernel_size_t strspn(const char *,const char *); + + +extern __kernel_size_t strcspn(const char *,const char *); +# 122 "./include/linux/string.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset_l(unsigned long *p, unsigned long v, + __kernel_size_t n) +{ + if (64 == 32) + return memset32((uint32_t *)p, v, n); + else + return memset64((uint64_t *)p, v, n); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *memset_p(void **p, void *v, __kernel_size_t n) +{ + if (64 == 32) + return memset32((uint32_t *)p, (uintptr_t)v, n); + else + return memset64((uint64_t *)p, (uintptr_t)v, n); +} + +extern void **__memcat_p(void **a, void **b); +# 153 "./include/linux/string.h" +extern void * memscan(void *,int,__kernel_size_t); + + +extern int memcmp(const void *,const void *,__kernel_size_t); + + +extern int bcmp(const void *,const void *,__kernel_size_t); + + +extern void * memchr(const void *,int,__kernel_size_t); +# 178 "./include/linux/string.h" +void *memchr_inv(const void *s, int c, size_t n); +char *strreplace(char *s, char old, char new); + +extern void kfree_const(const void *x); + +extern char *kstrdup(const char *s, gfp_t gfp) __attribute__((__malloc__)); +extern const char *kstrdup_const(const char *s, gfp_t gfp); +extern char *kstrndup(const char *s, size_t len, gfp_t gfp); +extern void *kmemdup(const void *src, size_t len, gfp_t gfp); +extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); + +extern char **argv_split(gfp_t gfp, const char *str, int *argcp); +extern void argv_free(char **argv); + +extern bool sysfs_streq(const char *s1, const char *s2); +extern int kstrtobool(const char *s, bool *res); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int strtobool(const char *s, bool *res) +{ + return kstrtobool(s, res); +} + +int match_string(const char * const *array, size_t n, const char *string); +int __sysfs_match_string(const char * const *array, size_t n, const char *s); +# 212 "./include/linux/string.h" +int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); +int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); +int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __attribute__((__format__(printf, 3, 4))); + + +extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, + const void *from, size_t available); + +int ptr_to_hashval(const void *ptr, unsigned long *hashval_out); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool strstarts(const char *str, const char *prefix) +{ + return strncmp(str, prefix, strlen(prefix)) == 0; +} + +size_t memweight(const void *ptr, size_t bytes); +# 248 "./include/linux/string.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memzero_explicit(void *s, size_t count) +{ + memset(s, 0, count); + __asm__ __volatile__("": :"r"(s) :"memory"); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *kbasename(const char *path) +{ + const char *tail = strrchr(path, '/'); + return tail ? tail + 1 : path; +} + + + + +void fortify_panic(const char *name) __attribute__((__noreturn__)) __attribute__((__cold__)); +void __read_overflow(void) __attribute__((__error__("detected read beyond size of object passed as 1st parameter"))); +void __read_overflow2(void) __attribute__((__error__("detected read beyond size of object passed as 2nd parameter"))); +void __read_overflow3(void) __attribute__((__error__("detected read beyond size of object passed as 3rd parameter"))); +void __write_overflow(void) __attribute__((__error__("detected write beyond size of object passed as 1st parameter"))); + + + + +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __asm__("memchr"); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __asm__("memcmp"); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __asm__("memcpy"); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __asm__("memmove"); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __asm__("memset"); +extern char *__underlying_strcat(char *p, const char *q) __asm__("strcat"); +extern char *__underlying_strcpy(char *p, const char *q) __asm__("strcpy"); +extern __kernel_size_t __underlying_strlen(const char *p) __asm__("strlen"); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __asm__("strncat"); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __asm__("strncpy"); +# 300 "./include/linux/string.h" +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) char *strncpy(char *p, const char *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_strncpy(p, q, size); +} + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) char *strcat(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 0); + if (p_size == (size_t)-1) + return __underlying_strcat(p, q); + if (strlcat(p, q, p_size) >= p_size) + fortify_panic(__func__); + return p; +} + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) __kernel_size_t strlen(const char *p) +{ + __kernel_size_t ret; + size_t p_size = __builtin_object_size(p, 0); + + + if (p_size == (size_t)-1 || + (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) + return __underlying_strlen(p); + ret = strnlen(p, p_size); + if (p_size <= ret) + fortify_panic(__func__); + return ret; +} + +extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __asm__("strnlen"); +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) +{ + size_t p_size = __builtin_object_size(p, 0); + __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); + if (p_size <= ret && maxlen != ret) + fortify_panic(__func__); + return ret; +} + + +extern size_t __real_strlcpy(char *, const char *, size_t) __asm__("strlcpy"); +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) size_t strlcpy(char *p, const char *q, size_t size) +{ + size_t ret; + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __real_strlcpy(p, q, size); + ret = strlen(q); + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + if (__builtin_constant_p(len) && len >= p_size) + __write_overflow(); + if (len >= p_size) + fortify_panic(__func__); + __underlying_memcpy(p, q, len); + p[len] = '\0'; + } + return ret; +} + + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) char *strncat(char *p, const char *q, __kernel_size_t count) +{ + size_t p_len, copy_len; + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __underlying_strncat(p, q, count); + p_len = strlen(p); + copy_len = strnlen(q, count); + if (p_size < p_len + copy_len + 1) + fortify_panic(__func__); + __underlying_memcpy(p + p_len, q, copy_len); + p[p_len + copy_len] = '\0'; + return p; +} + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) void *memset(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_memset(p, c, size); +} + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) void *memcpy(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memcpy(p, q, size); +} + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) void *memmove(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memmove(p, q, size); +} + +extern void *__real_memscan(void *, int, __kernel_size_t) __asm__("memscan"); +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) void *memscan(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memscan(p, c, size); +} + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) int memcmp(const void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __read_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memcmp(p, q, size); +} + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) void *memchr(const void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_memchr(p, c, size); +} + +void *__real_memchr_inv(const void *s, int c, size_t n) __asm__("memchr_inv"); +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) void *memchr_inv(const void *p, int c, size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memchr_inv(p, c, size); +} + +extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __asm__("kmemdup"); +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) void *kmemdup(const void *p, size_t size, gfp_t gfp) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_kmemdup(p, size, gfp); +} + + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((gnu_inline)) char *strcpy(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __underlying_strcpy(p, q); + memcpy(p, q, strlen(q) + 1); + return p; +} +# 514 "./include/linux/string.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memcpy_and_pad(void *dest, size_t dest_len, + const void *src, size_t count, int pad) +{ + if (dest_len > count) { + memcpy(dest, src, count); + memset(dest + count, pad, dest_len - count); + } else + memcpy(dest, src, dest_len); +} +# 539 "./include/linux/string.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) size_t str_has_prefix(const char *str, const char *prefix) +{ + size_t len = strlen(prefix); + return strncmp(str, prefix, len) == 0 ? len : 0; +} +# 10 "./include/linux/bitmap.h" 2 +# 121 "./include/linux/bitmap.h" +extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); +extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +extern void bitmap_free(const unsigned long *bitmap); + + + + + +extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits); +extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits); +extern int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern bool __attribute__((__pure__)) __bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits); +extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits); +extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern void bitmap_cut(unsigned long *dst, const unsigned long *src, + unsigned int first, unsigned int cut, + unsigned int nbits); +extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern void __bitmap_replace(unsigned long *dst, + const unsigned long *old, const unsigned long *new, + const unsigned long *mask, unsigned int nbits); +extern int __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); +extern void __bitmap_set(unsigned long *map, unsigned int start, int len); +extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); + +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); +# 184 "./include/linux/bitmap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, 0); +} + +extern int bitmap_parse(const char *buf, unsigned int buflen, + unsigned long *dst, int nbits); +extern int bitmap_parse_user(const char *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); +extern int bitmap_parselist(const char *buf, unsigned long *maskp, + int nmaskbits); +extern int bitmap_parselist_user(const char *ubuf, unsigned int ulen, + unsigned long *dst, int nbits); +extern void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, unsigned int nbits); +extern int bitmap_bitremap(int oldbit, + const unsigned long *old, const unsigned long *new, int bits); +extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, unsigned int bits); +extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, + unsigned int sz, unsigned int nbits); +extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); +extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); +extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); + + + + + + +extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); +extern int bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +# 235 "./include/linux/bitmap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_zero(unsigned long *dst, unsigned int nbits) +{ + unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long); + memset(dst, 0, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_fill(unsigned long *dst, unsigned int nbits) +{ + unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long); + memset(dst, 0xff, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_copy(unsigned long *dst, const unsigned long *src, + unsigned int nbits) +{ + unsigned int len = (((nbits) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(unsigned long); + memcpy(dst, src, len); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_copy_clear_tail(unsigned long *dst, + const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % 64) + dst[nbits / 64] &= (~0UL >> (-(nbits) & (64 - 1))); +} + + + + + + +extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, + unsigned int nbits); +extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, + unsigned int nbits); +# 283 "./include/linux/bitmap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_and(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return (*dst = *src1 & *src2 & (~0UL >> (-(nbits) & (64 - 1)))) != 0; + return __bitmap_and(dst, src1, src2, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_or(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + *dst = *src1 | *src2; + else + __bitmap_or(dst, src1, src2, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_xor(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + *dst = *src1 ^ *src2; + else + __bitmap_xor(dst, src1, src2, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return (*dst = *src1 & ~(*src2) & (~0UL >> (-(nbits) & (64 - 1)))) != 0; + return __bitmap_andnot(dst, src1, src2, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + *dst = ~(*src); + else + __bitmap_complement(dst, src, nbits); +} +# 333 "./include/linux/bitmap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_equal(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return !((*src1 ^ *src2) & (~0UL >> (-(nbits) & (64 - 1)))); + if (__builtin_constant_p(nbits & (8 - 1)) && + (((nbits) & ((typeof(nbits))(8) - 1)) == 0)) + return !memcmp(src1, src2, nbits / 8); + return __bitmap_equal(src1, src2, nbits); +} +# 353 "./include/linux/bitmap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits) +{ + if (!(__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return __bitmap_or_equal(src1, src2, src3, nbits); + + return !(((*src1 | *src2) ^ *src3) & (~0UL >> (-(nbits) & (64 - 1)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_intersects(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return ((*src1 & *src2) & (~0UL >> (-(nbits) & (64 - 1)))) != 0; + else + return __bitmap_intersects(src1, src2, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_subset(const unsigned long *src1, + const unsigned long *src2, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return ! ((*src1 & ~(*src2)) & (~0UL >> (-(nbits) & (64 - 1)))); + else + return __bitmap_subset(src1, src2, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_empty(const unsigned long *src, unsigned nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return ! (*src & (~0UL >> (-(nbits) & (64 - 1)))); + + return find_first_bit(src, nbits) == nbits; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bitmap_full(const unsigned long *src, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return ! (~(*src) & (~0UL >> (-(nbits) & (64 - 1)))); + + return find_first_zero_bit(src, nbits) == nbits; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int bitmap_weight(const unsigned long *src, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + return hweight_long(*src & (~0UL >> (-(nbits) & (64 - 1)))); + return __bitmap_weight(src, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void bitmap_set(unsigned long *map, unsigned int start, + unsigned int nbits) +{ + if (__builtin_constant_p(nbits) && nbits == 1) + __set_bit(start, map); + else if (__builtin_constant_p(start & (8 - 1)) && + (((start) & ((typeof(start))(8) - 1)) == 0) && + __builtin_constant_p(nbits & (8 - 1)) && + (((nbits) & ((typeof(nbits))(8) - 1)) == 0)) + memset((char *)map + start / 8, 0xff, nbits / 8); + else + __bitmap_set(map, start, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void bitmap_clear(unsigned long *map, unsigned int start, + unsigned int nbits) +{ + if (__builtin_constant_p(nbits) && nbits == 1) + __clear_bit(start, map); + else if (__builtin_constant_p(start & (8 - 1)) && + (((start) & ((typeof(start))(8) - 1)) == 0) && + __builtin_constant_p(nbits & (8 - 1)) && + (((nbits) & ((typeof(nbits))(8) - 1)) == 0)) + memset((char *)map + start / 8, 0, nbits / 8); + else + __bitmap_clear(map, start, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + *dst = (*src & (~0UL >> (-(nbits) & (64 - 1)))) >> shift; + else + __bitmap_shift_right(dst, src, shift, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + *dst = (*src << shift) & (~0UL >> (-(nbits) & (64 - 1))); + else + __bitmap_shift_left(dst, src, shift, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_replace(unsigned long *dst, + const unsigned long *old, + const unsigned long *new, + const unsigned long *mask, + unsigned int nbits) +{ + if ((__builtin_constant_p(nbits) && (nbits) <= 64 && (nbits) > 0)) + *dst = (*old & ~(*mask)) | (*new & *mask); + else + __bitmap_replace(dst, old, new, mask, nbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_next_clear_region(unsigned long *bitmap, + unsigned int *rs, unsigned int *re, + unsigned int end) +{ + *rs = find_next_zero_bit(bitmap, end, *rs); + *re = find_next_bit(bitmap, end, *rs + 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_next_set_region(unsigned long *bitmap, + unsigned int *rs, unsigned int *re, + unsigned int end) +{ + *rs = find_next_bit(bitmap, end, *rs); + *re = find_next_zero_bit(bitmap, end, *rs + 1); +} +# 541 "./include/linux/bitmap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & (~0UL); + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +# 557 "./include/linux/bitmap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = ((start) / 64); + const unsigned long offset = start % 64; + + return (map[index] >> offset) & 0xFF; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bitmap_set_value8(unsigned long *map, unsigned long value, + unsigned long start) +{ + const size_t index = ((start) / 64); + const unsigned long offset = start % 64; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +# 13 "./include/linux/cpumask.h" 2 +# 1 "./include/linux/atomic.h" 1 + + + + + + +# 1 "./arch/x86/include/asm/atomic.h" 1 + + + + + + + +# 1 "./arch/x86/include/asm/cmpxchg.h" 1 +# 13 "./arch/x86/include/asm/cmpxchg.h" +extern void __xchg_wrong_size(void) + __attribute__((__error__("Bad argument size for xchg"))); +extern void __cmpxchg_wrong_size(void) + __attribute__((__error__("Bad argument size for cmpxchg"))); +extern void __xadd_wrong_size(void) + __attribute__((__error__("Bad argument size for xadd"))); +extern void __add_wrong_size(void) + __attribute__((__error__("Bad argument size for add"))); +# 145 "./arch/x86/include/asm/cmpxchg.h" +# 1 "./arch/x86/include/asm/cmpxchg_64.h" 1 + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_64bit(volatile u64 *ptr, u64 val) +{ + *ptr = val; +} +# 146 "./arch/x86/include/asm/cmpxchg.h" 2 +# 9 "./arch/x86/include/asm/atomic.h" 2 +# 25 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_read(const atomic_t *v) +{ + + + + + return (*(const volatile typeof( _Generic(((v)->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((v)->counter))) *)&((v)->counter)); +} +# 41 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_set(atomic_t *v, int i) +{ + do { *(volatile typeof(v->counter) *)&(v->counter) = (i); } while (0); +} +# 53 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_add(int i, atomic_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addl %1,%0" + : "+m" (v->counter) + : "ir" (i) : "memory"); +} +# 67 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_sub(int i, atomic_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subl %1,%0" + : "+m" (v->counter) + : "ir" (i) : "memory"); +} +# 83 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_sub_and_test(int i, atomic_t *v) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subl" " %[val], " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (v->counter), "=@cc" "e" (c) : [val] "er" (i) : "memory"); c; }); +} +# 95 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_inc(atomic_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incl %0" + : "+m" (v->counter) :: "memory"); +} +# 108 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_dec(atomic_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decl %0" + : "+m" (v->counter) :: "memory"); +} +# 123 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_dec_and_test(atomic_t *v) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decl" " " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (v->counter), "=@cc" "e" (c) : : "memory"); c; }); +} +# 137 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_inc_and_test(atomic_t *v) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incl" " " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (v->counter), "=@cc" "e" (c) : : "memory"); c; }); +} +# 152 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_add_negative(int i, atomic_t *v) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addl" " %[val], " "%[var]" "\n\t/* output condition code " "s" "*/\n" : [var] "+m" (v->counter), "=@cc" "s" (c) : [val] "er" (i) : "memory"); c; }); +} +# 165 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_add_return(int i, atomic_t *v) +{ + return i + ({ __typeof__ (*(((&v->counter)))) __ret = (((i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; }); +} +# 178 "./arch/x86/include/asm/atomic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_sub_return(int i, atomic_t *v) +{ + return arch_atomic_add_return(-i, v); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_fetch_add(int i, atomic_t *v) +{ + return ({ __typeof__ (*(((&v->counter)))) __ret = (((i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_fetch_sub(int i, atomic_t *v) +{ + return ({ __typeof__ (*(((&v->counter)))) __ret = (((-i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_cmpxchg(atomic_t *v, int old, int new) +{ + return ({ __typeof__(*((&v->counter))) __ret; __typeof__(*((&v->counter))) __old = ((old)); __typeof__(*((&v->counter))) __new = ((new)); switch ((sizeof(*(&v->counter)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + return ({ bool success; __typeof__(((&v->counter))) _old = (__typeof__(((&v->counter))))(((old))); __typeof__(*(((&v->counter)))) __old = *_old; __typeof__(*(((&v->counter)))) __new = (((new))); switch ((sizeof(*(&v->counter)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %[new], %[ptr]" "\n\t/* output condition code " "z" "*/\n" : "=@cc" "z" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "q" (__new) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %[new], %[ptr]" "\n\t/* output condition code " "z" "*/\n" : "=@cc" "z" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %[new], %[ptr]" "\n\t/* output condition code " "z" "*/\n" : "=@cc" "z" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %[new], %[ptr]" "\n\t/* output condition code " "z" "*/\n" : "=@cc" "z" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } default: __cmpxchg_wrong_size(); } if (__builtin_expect(!!(!success), 0)) *_old = __old; __builtin_expect(!!(success), 1); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_xchg(atomic_t *v, int new) +{ + return ({ __typeof__ (*((&v->counter))) __ret = ((new)); switch (sizeof(*((&v->counter)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_and(int i, atomic_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "andl %1,%0" + : "+m" (v->counter) + : "ir" (i) + : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_fetch_and(int i, atomic_t *v) +{ + int val = arch_atomic_read(v); + + do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i)); + + return val; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_or(int i, atomic_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "orl %1,%0" + : "+m" (v->counter) + : "ir" (i) + : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_fetch_or(int i, atomic_t *v) +{ + int val = arch_atomic_read(v); + + do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i)); + + return val; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic_xor(int i, atomic_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xorl %1,%0" + : "+m" (v->counter) + : "ir" (i) + : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_atomic_fetch_xor(int i, atomic_t *v) +{ + int val = arch_atomic_read(v); + + do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i)); + + return val; +} + + + + + +# 1 "./arch/x86/include/asm/atomic64_64.h" 1 +# 20 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_read(const atomic64_t *v) +{ + return (*(const volatile typeof( _Generic(((v)->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((v)->counter))) *)&((v)->counter)); +} +# 32 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_set(atomic64_t *v, s64 i) +{ + do { *(volatile typeof(v->counter) *)&(v->counter) = (i); } while (0); +} +# 44 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic64_add(s64 i, atomic64_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addq %1,%0" + : "=m" (v->counter) + : "er" (i), "m" (v->counter) : "memory"); +} +# 58 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_sub(s64 i, atomic64_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subq %1,%0" + : "=m" (v->counter) + : "er" (i), "m" (v->counter) : "memory"); +} +# 74 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "subq" " %[val], " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (v->counter), "=@cc" "e" (c) : [val] "er" (i) : "memory"); c; }); +} +# 86 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic64_inc(atomic64_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incq %0" + : "=m" (v->counter) + : "m" (v->counter) : "memory"); +} +# 100 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void arch_atomic64_dec(atomic64_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decq %0" + : "=m" (v->counter) + : "m" (v->counter) : "memory"); +} +# 116 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_atomic64_dec_and_test(atomic64_t *v) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "decq" " " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (v->counter), "=@cc" "e" (c) : : "memory"); c; }); +} +# 130 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_atomic64_inc_and_test(atomic64_t *v) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "incq" " " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (v->counter), "=@cc" "e" (c) : : "memory"); c; }); +} +# 145 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_atomic64_add_negative(s64 i, atomic64_t *v) +{ + return ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "addq" " %[val], " "%[var]" "\n\t/* output condition code " "s" "*/\n" : [var] "+m" (v->counter), "=@cc" "s" (c) : [val] "er" (i) : "memory"); c; }); +} +# 158 "./arch/x86/include/asm/atomic64_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 arch_atomic64_add_return(s64 i, atomic64_t *v) +{ + return i + ({ __typeof__ (*(((&v->counter)))) __ret = (((i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) +{ + return arch_atomic64_add_return(-i, v); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) +{ + return ({ __typeof__ (*(((&v->counter)))) __ret = (((i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + return ({ __typeof__ (*(((&v->counter)))) __ret = (((-i))); switch (sizeof(*(((&v->counter))))) { case 1: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "b %b0, %1\n" : "+q" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 2: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "w %w0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 4: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "l %0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; case 8: asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xadd" "q %q0, %1\n" : "+r" (__ret), "+m" (*(((&v->counter)))) : : "memory", "cc"); break; default: __xadd_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + return ({ __typeof__(*((&v->counter))) __ret; __typeof__(*((&v->counter))) __old = ((old)); __typeof__(*((&v->counter))) __new = ((new)); switch ((sizeof(*(&v->counter)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)((&v->counter)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + return ({ bool success; __typeof__(((&v->counter))) _old = (__typeof__(((&v->counter))))(((old))); __typeof__(*(((&v->counter)))) __old = *_old; __typeof__(*(((&v->counter)))) __new = (((new))); switch ((sizeof(*(&v->counter)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %[new], %[ptr]" "\n\t/* output condition code " "z" "*/\n" : "=@cc" "z" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "q" (__new) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %[new], %[ptr]" "\n\t/* output condition code " "z" "*/\n" : "=@cc" "z" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %[new], %[ptr]" "\n\t/* output condition code " "z" "*/\n" : "=@cc" "z" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)(((&v->counter))); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %[new], %[ptr]" "\n\t/* output condition code " "z" "*/\n" : "=@cc" "z" (success), [ptr] "+m" (*__ptr), [old] "+a" (__old) : [new] "r" (__new) : "memory"); break; } default: __cmpxchg_wrong_size(); } if (__builtin_expect(!!(!success), 0)) *_old = __old; __builtin_expect(!!(success), 1); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_xchg(atomic64_t *v, s64 new) +{ + return ({ __typeof__ (*((&v->counter))) __ret = ((new)); switch (sizeof(*((&v->counter)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((&v->counter))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_and(s64 i, atomic64_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "andq %1,%0" + : "+m" (v->counter) + : "er" (i) + : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 val = arch_atomic64_read(v); + + do { + } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); + return val; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_or(s64 i, atomic64_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "orq %1,%0" + : "+m" (v->counter) + : "er" (i) + : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 val = arch_atomic64_read(v); + + do { + } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); + return val; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_atomic64_xor(s64 i, atomic64_t *v) +{ + asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "xorq %1,%0" + : "+m" (v->counter) + : "er" (i) + : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 val = arch_atomic64_read(v); + + do { + } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); + return val; +} +# 272 "./arch/x86/include/asm/atomic.h" 2 +# 8 "./include/linux/atomic.h" 2 +# 81 "./include/linux/atomic.h" +# 1 "./include/linux/atomic-arch-fallback.h" 1 +# 81 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_read_acquire(const atomic_t *v) +{ + return ({ typeof(*&(v)->counter) ___p1 = ({ do { extern void __compiletime_assert_20(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)) || sizeof(*&(v)->counter) == sizeof(long long))) __compiletime_assert_20(); } while (0); ({ typeof( _Generic((*&(v)->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&(v)->counter))) __x = (*(const volatile typeof( _Generic((*&(v)->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&(v)->counter))) *)&(*&(v)->counter)); do { } while (0); (typeof(*&(v)->counter))__x; }); }); do { extern void __compiletime_assert_21(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_21(); } while (0); __asm__ __volatile__("": : :"memory"); ___p1; }); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch_atomic_set_release(atomic_t *v, int i) +{ + do { do { extern void __compiletime_assert_22(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_22(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_23(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)) || sizeof(*&(v)->counter) == sizeof(long long))) __compiletime_assert_23(); } while (0); do { *(volatile typeof(*&(v)->counter) *)&(*&(v)->counter) = (i); } while (0); } while (0); } while (0); +} +# 283 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_inc_return(atomic_t *v) +{ + return arch_atomic_add_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + return arch_atomic_add_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_inc_return_release(atomic_t *v) +{ + return arch_atomic_add_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_inc_return_relaxed(atomic_t *v) +{ + return arch_atomic_add_return(1, v); +} +# 364 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_inc(atomic_t *v) +{ + return arch_atomic_fetch_add(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + return arch_atomic_fetch_add(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + return arch_atomic_fetch_add(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_inc_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_add(1, v); +} +# 454 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_dec_return(atomic_t *v) +{ + return arch_atomic_sub_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + return arch_atomic_sub_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_dec_return_release(atomic_t *v) +{ + return arch_atomic_sub_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_dec_return_relaxed(atomic_t *v) +{ + return arch_atomic_sub_return(1, v); +} +# 535 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_dec(atomic_t *v) +{ + return arch_atomic_fetch_sub(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + return arch_atomic_fetch_sub(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + return arch_atomic_fetch_sub(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_dec_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_sub(1, v); +} +# 651 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch_atomic_andnot(int i, atomic_t *v) +{ + arch_atomic_and(~i, v); +} +# 667 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + return arch_atomic_fetch_and(~i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return arch_atomic_fetch_and(~i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return arch_atomic_fetch_and(~i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return arch_atomic_fetch_and(~i, v); +} +# 1085 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = arch_atomic_read(v); + + do { + if (__builtin_expect(!!(c == u), 0)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +# 1110 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_atomic_add_unless(atomic_t *v, int a, int u) +{ + return arch_atomic_fetch_add_unless(v, a, u) != u; +} +# 1126 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_atomic_inc_not_zero(atomic_t *v) +{ + return arch_atomic_add_unless(v, 1, 0); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_atomic_inc_unless_negative(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (__builtin_expect(!!(c < 0), 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_atomic_dec_unless_positive(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (__builtin_expect(!!(c > 0), 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +arch_atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = arch_atomic_read(v); + + do { + dec = c - 1; + if (__builtin_expect(!!(dec < 0), 0)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +# 1188 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_read_acquire(const atomic64_t *v) +{ + return ({ typeof(*&(v)->counter) ___p1 = ({ do { extern void __compiletime_assert_24(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)) || sizeof(*&(v)->counter) == sizeof(long long))) __compiletime_assert_24(); } while (0); ({ typeof( _Generic((*&(v)->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&(v)->counter))) __x = (*(const volatile typeof( _Generic((*&(v)->counter), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&(v)->counter))) *)&(*&(v)->counter)); do { } while (0); (typeof(*&(v)->counter))__x; }); }); do { extern void __compiletime_assert_25(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_25(); } while (0); __asm__ __volatile__("": : :"memory"); ___p1; }); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch_atomic64_set_release(atomic64_t *v, s64 i) +{ + do { do { extern void __compiletime_assert_26(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)))) __compiletime_assert_26(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_27(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(v)->counter) == sizeof(char) || sizeof(*&(v)->counter) == sizeof(short) || sizeof(*&(v)->counter) == sizeof(int) || sizeof(*&(v)->counter) == sizeof(long)) || sizeof(*&(v)->counter) == sizeof(long long))) __compiletime_assert_27(); } while (0); do { *(volatile typeof(*&(v)->counter) *)&(*&(v)->counter) = (i); } while (0); } while (0); } while (0); +} +# 1390 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + return arch_atomic64_add_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + return arch_atomic64_add_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + return arch_atomic64_add_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_inc_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_add_return(1, v); +} +# 1471 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + return arch_atomic64_fetch_add(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_add(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + return arch_atomic64_fetch_add(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_add(1, v); +} +# 1561 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + return arch_atomic64_sub_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + return arch_atomic64_sub_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + return arch_atomic64_sub_return(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_dec_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_sub_return(1, v); +} +# 1642 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + return arch_atomic64_fetch_sub(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_sub(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + return arch_atomic64_fetch_sub(1, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_sub(1, v); +} +# 1758 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +arch_atomic64_andnot(s64 i, atomic64_t *v) +{ + arch_atomic64_and(~i, v); +} +# 1774 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and(~i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and(~i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and(~i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and(~i, v); +} +# 2192 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 c = arch_atomic64_read(v); + + do { + if (__builtin_expect(!!(c == u), 0)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +# 2217 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return arch_atomic64_fetch_add_unless(v, a, u) != u; +} +# 2233 "./include/linux/atomic-arch-fallback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_atomic64_inc_not_zero(atomic64_t *v) +{ + return arch_atomic64_add_unless(v, 1, 0); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (__builtin_expect(!!(c < 0), 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +arch_atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (__builtin_expect(!!(c > 0), 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +arch_atomic64_dec_if_positive(atomic64_t *v) +{ + s64 dec, c = arch_atomic64_read(v); + + do { + dec = c - 1; + if (__builtin_expect(!!(dec < 0), 0)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +# 82 "./include/linux/atomic.h" 2 +# 1 "./include/asm-generic/atomic-instrumented.h" 1 +# 24 "./include/asm-generic/atomic-instrumented.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_read(const atomic_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_read(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_read_acquire(const atomic_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_read_acquire(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_set(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_set(v, i); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_set_release(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_set_release(v, i); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_add(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_add(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_add_return(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_add_return_acquire(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_add_return_release(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_add_return_relaxed(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_add(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_add_acquire(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_add_release(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_add_relaxed(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_sub(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_sub(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_sub_return(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_sub_return_release(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_sub_return_relaxed(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_sub(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_sub_relaxed(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_inc(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_inc(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_inc_return(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_inc_return(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_inc_return_acquire(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_inc_return_acquire(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_inc_return_release(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_inc_return_release(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_inc_return_relaxed(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_inc_return_relaxed(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_inc(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_inc(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_inc_acquire(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_acquire(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_inc_release(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_release(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_relaxed(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_dec(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_dec(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_dec_return(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_dec_return(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_dec_return_acquire(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_dec_return_acquire(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_dec_return_release(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_dec_return_release(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_dec_return_relaxed(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_dec_return_relaxed(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_dec(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_dec(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_dec_acquire(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_acquire(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_dec_release(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_release(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_relaxed(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_and(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_and(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_and(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_and_release(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_and_relaxed(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_andnot(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_andnot(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_andnot(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_acquire(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_release(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_relaxed(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_or(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_or(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_or(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_or_release(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_or_relaxed(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_xor(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_xor(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_xor(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_xor_release(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_xor_relaxed(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_xchg(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_xchg_acquire(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_xchg_release(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_xchg_relaxed(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_sub_and_test(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_sub_and_test(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_dec_and_test(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_dec_and_test(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_inc_and_test(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_inc_and_test(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_add_negative(int i, atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_add_negative(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_fetch_add_unless(v, a, u); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_add_unless(v, a, u); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_inc_not_zero(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_inc_not_zero(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_inc_unless_negative(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_inc_unless_negative(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_dec_unless_positive(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_dec_unless_positive(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int +atomic_dec_if_positive(atomic_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic_dec_if_positive(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_read(const atomic64_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic64_read(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_read_acquire(const atomic64_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic64_read_acquire(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_set(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_set(v, i); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_set_release(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_set_release(v, i); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_add(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_add(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_add_return(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_add_return_release(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_add_return_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_add(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_sub(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_sub(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_sub_return_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_inc(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_inc(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_inc_return(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_inc_return(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_inc_return_acquire(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_inc_return_release(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_inc_return_release(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_inc_return_relaxed(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_inc(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_acquire(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_release(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_relaxed(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_dec(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_dec(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_dec_return(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_dec_return(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_dec_return_acquire(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_dec_return_release(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_dec_return_release(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_dec_return_relaxed(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_dec(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_acquire(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_release(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_relaxed(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_and(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_and(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_and(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_andnot(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_andnot(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_acquire(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_release(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_or(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_or(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_or(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic64_xor(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_xor(i, v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_xchg_relaxed(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_sub_and_test(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_dec_and_test(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_dec_and_test(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_inc_and_test(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_inc_and_test(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_add_negative(s64 i, atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_add_negative(i, v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_unless(v, a, u); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_add_unless(v, a, u); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_inc_not_zero(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_inc_not_zero(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_inc_unless_negative(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_dec_unless_positive(v); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + instrument_atomic_write(v, sizeof(*v)); + return arch_atomic64_dec_if_positive(v); +} +# 83 "./include/linux/atomic.h" 2 + + + + +# 1 "./include/asm-generic/atomic-long.h" 1 +# 10 "./include/asm-generic/atomic-long.h" +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 11 "./include/asm-generic/atomic-long.h" 2 + + +typedef atomic64_t atomic_long_t; +# 26 "./include/asm-generic/atomic-long.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_read(const atomic_long_t *v) +{ + return atomic64_read(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_read_acquire(const atomic_long_t *v) +{ + return atomic64_read_acquire(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_set(atomic_long_t *v, long i) +{ + atomic64_set(v, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_set_release(atomic_long_t *v, long i) +{ + atomic64_set_release(v, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_add(long i, atomic_long_t *v) +{ + atomic64_add(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_add_return(long i, atomic_long_t *v) +{ + return atomic64_add_return(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return atomic64_add_return_acquire(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return atomic64_add_return_release(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return atomic64_add_return_relaxed(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return atomic64_fetch_add(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_acquire(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_release(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_add_relaxed(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_sub(long i, atomic_long_t *v) +{ + atomic64_sub(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + return atomic64_sub_return(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return atomic64_sub_return_acquire(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return atomic64_sub_return_release(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return atomic64_sub_return_relaxed(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_acquire(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_release(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_sub_relaxed(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_inc(atomic_long_t *v) +{ + atomic64_inc(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_inc_return(atomic_long_t *v) +{ + return atomic64_inc_return(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return atomic64_inc_return_acquire(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_inc_return_release(atomic_long_t *v) +{ + return atomic64_inc_return_release(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return atomic64_inc_return_relaxed(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_inc(atomic_long_t *v) +{ + return atomic64_fetch_inc(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return atomic64_fetch_inc_acquire(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return atomic64_fetch_inc_release(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return atomic64_fetch_inc_relaxed(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_dec(atomic_long_t *v) +{ + atomic64_dec(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_dec_return(atomic_long_t *v) +{ + return atomic64_dec_return(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return atomic64_dec_return_acquire(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_dec_return_release(atomic_long_t *v) +{ + return atomic64_dec_return_release(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return atomic64_dec_return_relaxed(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_dec(atomic_long_t *v) +{ + return atomic64_fetch_dec(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return atomic64_fetch_dec_acquire(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return atomic64_fetch_dec_release(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return atomic64_fetch_dec_relaxed(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_and(long i, atomic_long_t *v) +{ + atomic64_and(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return atomic64_fetch_and(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_acquire(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_release(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_and_relaxed(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_andnot(long i, atomic_long_t *v) +{ + atomic64_andnot(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_acquire(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_release(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_andnot_relaxed(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_or(long i, atomic_long_t *v) +{ + atomic64_or(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return atomic64_fetch_or(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_acquire(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_release(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_or_relaxed(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +atomic_long_xor(long i, atomic_long_t *v) +{ + atomic64_xor(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_acquire(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_release(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return atomic64_fetch_xor_relaxed(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_xchg(atomic_long_t *v, long i) +{ + return atomic64_xchg(v, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return atomic64_xchg_acquire(v, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return atomic64_xchg_release(v, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return atomic64_xchg_relaxed(v, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg(v, old, new); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_acquire(v, old, new); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_release(v, old, new); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return atomic64_cmpxchg_relaxed(v, old, new); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg(v, (s64 *)old, new); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_release(v, (s64 *)old, new); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return atomic64_sub_and_test(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + return atomic64_dec_and_test(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + return atomic64_inc_and_test(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + return atomic64_add_negative(i, v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic64_fetch_add_unless(v, a, u); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return atomic64_add_unless(v, a, u); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + return atomic64_inc_not_zero(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return atomic64_inc_unless_negative(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return atomic64_dec_unless_positive(v); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + return atomic64_dec_if_positive(v); +} +# 88 "./include/linux/atomic.h" 2 +# 14 "./include/linux/cpumask.h" 2 + + + +typedef struct cpumask { unsigned long bits[(((8192) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; } cpumask_t; +# 39 "./include/linux/cpumask.h" +extern unsigned int nr_cpu_ids; +# 90 "./include/linux/cpumask.h" +extern struct cpumask __cpu_possible_mask; +extern struct cpumask __cpu_online_mask; +extern struct cpumask __cpu_present_mask; +extern struct cpumask __cpu_active_mask; + + + + + +extern atomic_t __num_online_cpus; +# 110 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int num_online_cpus(void) +{ + return atomic_read(&__num_online_cpus); +} +# 132 "./include/linux/cpumask.h" +extern cpumask_t cpus_booted_once_mask; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) +{ + + ({ int __ret_warn_on = !!(cpu >= bits); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (28)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/cpumask.h"), "i" (137), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (29)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (30)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_check(unsigned int cpu) +{ + cpu_max_bits_warn(cpu, nr_cpu_ids); + return cpu; +} +# 217 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_first(const struct cpumask *srcp) +{ + return find_first_bit(((srcp)->bits), nr_cpu_ids); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_last(const struct cpumask *srcp) +{ + return find_last_bit(((srcp)->bits), nr_cpu_ids); +} + +unsigned int cpumask_next(int n, const struct cpumask *srcp); +# 242 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) +{ + + if (n != -1) + cpumask_check(n); + return find_next_zero_bit(((srcp)->bits), nr_cpu_ids, n+1); +} + +int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); +int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); +unsigned int cpumask_local_spread(unsigned int i, int node); +int cpumask_any_and_distribute(const struct cpumask *src1p, + const struct cpumask *src2p); +# 280 "./include/linux/cpumask.h" +extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); +# 332 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +{ + set_bit(cpumask_check(cpu), ((dstp)->bits)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +{ + __set_bit(cpumask_check(cpu), ((dstp)->bits)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp) +{ + clear_bit(cpumask_check(cpu), ((dstp)->bits)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) +{ + __clear_bit(cpumask_check(cpu), ((dstp)->bits)); +} +# 365 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) +{ + return test_bit(cpumask_check(cpu), (((cpumask))->bits)); +} +# 379 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits)); +} +# 393 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) +{ + return test_and_clear_bit(cpumask_check(cpu), ((cpumask)->bits)); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_setall(struct cpumask *dstp) +{ + bitmap_fill(((dstp)->bits), nr_cpu_ids); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_clear(struct cpumask *dstp) +{ + bitmap_zero(((dstp)->bits), nr_cpu_ids); +} +# 424 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_and(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_and(((dstp)->bits), ((src1p)->bits), + ((src2p)->bits), nr_cpu_ids); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_or(((dstp)->bits), ((src1p)->bits), + ((src2p)->bits), nr_cpu_ids); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_xor(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + bitmap_xor(((dstp)->bits), ((src1p)->bits), + ((src2p)->bits), nr_cpu_ids); +} +# 467 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_andnot(struct cpumask *dstp, + const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_andnot(((dstp)->bits), ((src1p)->bits), + ((src2p)->bits), nr_cpu_ids); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_complement(struct cpumask *dstp, + const struct cpumask *srcp) +{ + bitmap_complement(((dstp)->bits), ((srcp)->bits), + nr_cpu_ids); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_equal(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_equal(((src1p)->bits), ((src2p)->bits), + nr_cpu_ids); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_or_equal(const struct cpumask *src1p, + const struct cpumask *src2p, + const struct cpumask *src3p) +{ + return bitmap_or_equal(((src1p)->bits), ((src2p)->bits), + ((src3p)->bits), nr_cpu_ids); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_intersects(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_intersects(((src1p)->bits), ((src2p)->bits), + nr_cpu_ids); +} +# 532 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_subset(const struct cpumask *src1p, + const struct cpumask *src2p) +{ + return bitmap_subset(((src1p)->bits), ((src2p)->bits), + nr_cpu_ids); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_empty(const struct cpumask *srcp) +{ + return bitmap_empty(((srcp)->bits), nr_cpu_ids); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_full(const struct cpumask *srcp) +{ + return bitmap_full(((srcp)->bits), nr_cpu_ids); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_weight(const struct cpumask *srcp) +{ + return bitmap_weight(((srcp)->bits), nr_cpu_ids); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_shift_right(struct cpumask *dstp, + const struct cpumask *srcp, int n) +{ + bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n, + nr_cpu_ids); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_shift_left(struct cpumask *dstp, + const struct cpumask *srcp, int n) +{ + bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n, + nr_cpu_ids); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpumask_copy(struct cpumask *dstp, + const struct cpumask *srcp) +{ + bitmap_copy(((dstp)->bits), ((srcp)->bits), nr_cpu_ids); +} +# 643 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_parse_user(const char *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parse_user(buf, len, ((dstp)->bits), nr_cpu_ids); +} +# 657 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_parselist_user(const char *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parselist_user(buf, len, ((dstp)->bits), + nr_cpu_ids); +} +# 671 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpumask_parse(const char *buf, struct cpumask *dstp) +{ + return bitmap_parse(buf, (~0U), ((dstp)->bits), nr_cpu_ids); +} +# 683 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpulist_parse(const char *buf, struct cpumask *dstp) +{ + return bitmap_parselist(buf, ((dstp)->bits), nr_cpu_ids); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpumask_size(void) +{ + return (((nr_cpu_ids) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8))) * sizeof(long); +} +# 737 "./include/linux/cpumask.h" +typedef struct cpumask *cpumask_var_t; + + + + +bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); +bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); +bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +void alloc_bootmem_cpumask_var(cpumask_var_t *mask); +void free_cpumask_var(cpumask_var_t mask); +void free_bootmem_cpumask_var(cpumask_var_t mask); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpumask_available(cpumask_var_t mask) +{ + return mask != ((void *)0); +} +# 805 "./include/linux/cpumask.h" +extern const unsigned long cpu_all_bits[(((8192) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; +# 816 "./include/linux/cpumask.h" +void init_cpu_present(const struct cpumask *src); +void init_cpu_possible(const struct cpumask *src); +void init_cpu_online(const struct cpumask *src); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void reset_cpu_possible_mask(void) +{ + bitmap_zero(((&__cpu_possible_mask)->bits), 8192); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +set_cpu_possible(unsigned int cpu, bool possible) +{ + if (possible) + cpumask_set_cpu(cpu, &__cpu_possible_mask); + else + cpumask_clear_cpu(cpu, &__cpu_possible_mask); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +set_cpu_present(unsigned int cpu, bool present) +{ + if (present) + cpumask_set_cpu(cpu, &__cpu_present_mask); + else + cpumask_clear_cpu(cpu, &__cpu_present_mask); +} + +void set_cpu_online(unsigned int cpu, bool online); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +set_cpu_active(unsigned int cpu, bool active) +{ + if (active) + cpumask_set_cpu(cpu, &__cpu_active_mask); + else + cpumask_clear_cpu(cpu, &__cpu_active_mask); +} +# 869 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __check_is_bitmap(const unsigned long *bitmap) +{ + return 1; +} +# 881 "./include/linux/cpumask.h" +extern const unsigned long + cpu_bit_bitmap[64 +1][(((8192) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cpumask *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % 64]; + p -= cpu / 64; + return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p)))); +} +# 918 "./include/linux/cpumask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ssize_t +cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) +{ + return bitmap_print_to_pagebuf(list, buf, ((mask)->bits), + nr_cpu_ids); +} +# 6 "./arch/x86/include/asm/cpumask.h" 2 + +extern cpumask_var_t cpu_callin_mask; +extern cpumask_var_t cpu_callout_mask; +extern cpumask_var_t cpu_initialized_mask; +extern cpumask_var_t cpu_sibling_setup_mask; + +extern void setup_cpu_local_masks(void); +# 12 "./arch/x86/include/asm/msr.h" 2 +# 1 "./arch/x86/include/uapi/asm/msr.h" 1 +# 13 "./arch/x86/include/asm/msr.h" 2 + +struct msr { + union { + struct { + u32 l; + u32 h; + }; + u64 q; + }; +}; + +struct msr_info { + u32 msr_no; + struct msr reg; + struct msr *msrs; + int err; +}; + +struct msr_regs_info { + u32 *regs; + int err; +}; + +struct saved_msr { + bool valid; + struct msr_info info; +}; + +struct saved_msrs { + unsigned int num; + struct saved_msr *array; +}; +# 68 "./arch/x86/include/asm/msr.h" +# 1 "./include/linux/tracepoint-defs.h" 1 +# 12 "./include/linux/tracepoint-defs.h" +# 1 "./include/linux/static_key.h" 1 +# 13 "./include/linux/tracepoint-defs.h" 2 + +struct trace_print_flags { + unsigned long mask; + const char *name; +}; + +struct trace_print_flags_u64 { + unsigned long long mask; + const char *name; +}; + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint { + const char *name; + struct static_key key; + int (*regfunc)(void); + void (*unregfunc)(void); + struct tracepoint_func *funcs; +}; + + +typedef const int tracepoint_ptr_t; + + + + +struct bpf_raw_event_map { + struct tracepoint *tp; + void *bpf_func; + u32 num_args; + u32 writable_size; +} __attribute__((__aligned__(32))); +# 69 "./arch/x86/include/asm/msr.h" 2 + +extern struct tracepoint __tracepoint_read_msr; +extern struct tracepoint __tracepoint_write_msr; +extern struct tracepoint __tracepoint_rdpmc; + +extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); +extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); +extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); +# 91 "./arch/x86/include/asm/msr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long __attribute__((no_instrument_function)) __rdmsr(unsigned int msr) +{ + unsigned long low, high; + + asm volatile("1: rdmsr\n" + "2:\n" + " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long (" "ex_handler_rdmsr_unsafe" ") - .\n" " .popsection\n" + : "=a" (low), "=d" (high) : "c" (msr)); + + return ((low) | (high) << 32); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((no_instrument_function)) __wrmsr(unsigned int msr, u32 low, u32 high) +{ + asm volatile("1: wrmsr\n" + "2:\n" + " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "2b" ") - .\n" " .long (" "ex_handler_wrmsr_unsafe" ") - .\n" " .popsection\n" + : : "c" (msr), "a"(low), "d" (high) : "memory"); +} +# 125 "./arch/x86/include/asm/msr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long native_read_msr(unsigned int msr) +{ + unsigned long long val; + + val = __rdmsr(msr); + + if (static_key_false(&(__tracepoint_read_msr).key)) + do_trace_read_msr(msr, val, 0); + + return val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long native_read_msr_safe(unsigned int msr, + int *err) +{ + unsigned long low, high; + + asm volatile("2: rdmsr ; xor %[err],%[err]\n" + "1:\n\t" + ".section .fixup,\"ax\"\n\t" + "3: mov %[fault],%[err]\n\t" + "xorl %%eax, %%eax\n\t" + "xorl %%edx, %%edx\n\t" + "jmp 1b\n\t" + ".previous\n\t" + " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "2b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default" ") - .\n" " .popsection\n" + : [err] "=r" (*err), "=a" (low), "=d" (high) + : "c" (msr), [fault] "i" (-5)); + if (static_key_false(&(__tracepoint_read_msr).key)) + do_trace_read_msr(msr, ((low) | (high) << 32), *err); + return ((low) | (high) << 32); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __attribute__((no_instrument_function)) +native_write_msr(unsigned int msr, u32 low, u32 high) +{ + __wrmsr(msr, low, high); + + if (static_key_false(&(__tracepoint_write_msr).key)) + do_trace_write_msr(msr, ((u64)high << 32 | low), 0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((no_instrument_function)) +native_write_msr_safe(unsigned int msr, u32 low, u32 high) +{ + int err; + + asm volatile("2: wrmsr ; xor %[err],%[err]\n" + "1:\n\t" + ".section .fixup,\"ax\"\n\t" + "3: mov %[fault],%[err] ; jmp 1b\n\t" + ".previous\n\t" + " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "2b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_default" ") - .\n" " .popsection\n" + : [err] "=a" (err) + : "c" (msr), "0" (low), "d" (high), + [fault] "i" (-5) + : "memory"); + if (static_key_false(&(__tracepoint_write_msr).key)) + do_trace_write_msr(msr, ((u64)high << 32 | low), err); + return err; +} + +extern int rdmsr_safe_regs(u32 regs[8]); +extern int wrmsr_safe_regs(u32 regs[8]); +# 201 "./arch/x86/include/asm/msr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long long rdtsc(void) +{ + unsigned long low, high; + + asm volatile("rdtsc" : "=a" (low), "=d" (high)); + + return ((low) | (high) << 32); +} +# 218 "./arch/x86/include/asm/msr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long long rdtsc_ordered(void) +{ + unsigned long low, high; +# 236 "./arch/x86/include/asm/msr.h" + asm volatile("# ALT: oldinstr2\n" "661:\n\t" "rdtsc" "\n662:\n" "# ALT: padding2\n" ".skip -((" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")) > 0) * " "(" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")), 0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+18)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" " .long 661b - .\n" " .long " "664""2""f - .\n" " .word " "( 1*32+27)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""2""f-""664""2""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "lfence; rdtsc" "\n" "665""1" ":\n" "# ALT: replacement " "2" "\n" "664""2"":\n\t" "rdtscp" "\n" "665""2" ":\n" ".popsection\n" + + + : "=a" (low), "=d" (high) + + :: "ecx"); + + return ((low) | (high) << 32); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long native_read_pmc(int counter) +{ + unsigned long low, high; + + asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); + if (static_key_false(&(__tracepoint_rdpmc).key)) + do_trace_rdpmc(counter, ((low) | (high) << 32), 0); + return ((low) | (high) << 32); +} + + +# 1 "./arch/x86/include/asm/paravirt.h" 1 +# 18 "./arch/x86/include/asm/paravirt.h" +# 1 "./arch/x86/include/asm/frame.h" 1 +# 19 "./arch/x86/include/asm/paravirt.h" 2 + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long paravirt_sched_clock(void) +{ + return ({ unsigned long long __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.time.sched_clock == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (31)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (22), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (32)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(unsigned long long) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, time.sched_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.time.sched_clock)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, time.sched_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.time.sched_clock)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long long)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(unsigned long long)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +__attribute__((__externally_visible__)) void __native_queued_spin_unlock(struct qspinlock *lock); +bool pv_is_native_spin_unlock(void); +__attribute__((__externally_visible__)) bool __native_vcpu_is_preempted(long cpu); +bool pv_is_native_vcpu_is_preempted(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 paravirt_steal_clock(int cpu) +{ + return ({ u64 __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.time.steal_clock == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (33)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (36), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (34)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(u64) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, time.steal_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.time.steal_clock)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(cpu)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, time.steal_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.time.steal_clock)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(cpu)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (u64)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(u64)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void slow_down_io(void) +{ + pv_ops.cpu.io_delay(); + + + + + +} + +void native_flush_tlb_local(void); +void native_flush_tlb_global(void); +void native_flush_tlb_one_user(unsigned long addr); +void native_flush_tlb_others(const struct cpumask *cpumask, + const struct flush_tlb_info *info); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __flush_tlb_local(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.flush_tlb_user == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (35)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (58), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (36)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.flush_tlb_user) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.flush_tlb_user)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __flush_tlb_global(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.flush_tlb_kernel == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (37)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (63), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (38)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.flush_tlb_kernel) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.flush_tlb_kernel)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __flush_tlb_one_user(unsigned long addr) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.flush_tlb_one_user == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (39)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (68), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (40)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.flush_tlb_one_user) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.flush_tlb_one_user)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(addr)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __flush_tlb_others(const struct cpumask *cpumask, + const struct flush_tlb_info *info) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.flush_tlb_others == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (41)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (74), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (42)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.flush_tlb_others) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.flush_tlb_others)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(cpumask)), "S" ((unsigned long)(info)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.tlb_remove_table == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (43)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (79), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (44)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.tlb_remove_table) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.tlb_remove_table)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(tlb)), "S" ((unsigned long)(table)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_arch_exit_mmap(struct mm_struct *mm) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.exit_mmap == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (45)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (84), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (46)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.exit_mmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.exit_mmap)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_sp0(unsigned long sp0) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.load_sp0 == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (47)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (90), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (48)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.load_sp0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.load_sp0)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(sp0)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __cpuid(unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.cpuid == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (49)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (97), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (50)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.cpuid) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.cpuid)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(eax)), "S" ((unsigned long)(ebx)), "d" ((unsigned long)(ecx)), "c" ((unsigned long)(edx)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long paravirt_get_debugreg(int reg) +{ + return ({ unsigned long __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.get_debugreg == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (51)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (105), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (52)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(unsigned long) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.get_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.get_debugreg)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(reg)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.get_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.get_debugreg)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(reg)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(unsigned long)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_debugreg(unsigned long val, int reg) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.set_debugreg == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (53)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (110), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (54)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.set_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.set_debugreg)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(reg)), "S" ((unsigned long)(val)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long read_cr0(void) +{ + return ({ unsigned long __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.read_cr0 == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (55)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (115), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (56)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(unsigned long) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.read_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.read_cr0)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.read_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.read_cr0)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(unsigned long)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_cr0(unsigned long x) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.write_cr0 == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (57)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (120), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (58)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.write_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.write_cr0)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(x)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long read_cr2(void) +{ + return ({ unsigned long __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.read_cr2.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (59)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (125), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (60)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(unsigned long) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.read_cr2.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.read_cr2.func)), [paravirt_clobber] "i" (((1 << 0))) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.read_cr2.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.read_cr2.func)), [paravirt_clobber] "i" (((1 << 0))) : "memory", "cc" ); __ret = (unsigned long)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(unsigned long)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_cr2(unsigned long x) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.write_cr2 == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (61)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (130), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (62)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.write_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.write_cr2)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(x)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __read_cr3(void) +{ + return ({ unsigned long __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.read_cr3 == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (63)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (135), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (64)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(unsigned long) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.read_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.read_cr3)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.read_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.read_cr3)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(unsigned long)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_cr3(unsigned long x) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.write_cr3 == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (65)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (140), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (66)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.write_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.write_cr3)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(x)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __write_cr4(unsigned long x) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.write_cr4 == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (67)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (145), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (68)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.write_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.write_cr4)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(x)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_safe_halt(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.irq.safe_halt == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (69)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (150), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (70)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, irq.safe_halt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.irq.safe_halt)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void halt(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.irq.halt == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (71)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (155), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (72)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, irq.halt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.irq.halt)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wbinvd(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.wbinvd == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (73)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (160), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (74)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.wbinvd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.wbinvd)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 paravirt_read_msr(unsigned msr) +{ + return ({ u64 __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.read_msr == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (75)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (167), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (76)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(u64) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.read_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.read_msr)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(msr)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.read_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.read_msr)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(msr)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (u64)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(u64)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_write_msr(unsigned msr, + unsigned low, unsigned high) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.write_msr == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (77)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (173), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (78)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.write_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.write_msr)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(msr)), "S" ((unsigned long)(low)), "d" ((unsigned long)(high)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 paravirt_read_msr_safe(unsigned msr, int *err) +{ + return ({ u64 __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.read_msr_safe == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (79)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (178), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (80)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(u64) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.read_msr_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.read_msr_safe)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(msr)), "S" ((unsigned long)(err)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.read_msr_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.read_msr_safe)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(msr)), "S" ((unsigned long)(err)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (u64)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(u64)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int paravirt_write_msr_safe(unsigned msr, + unsigned low, unsigned high) +{ + return ({ int __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.write_msr_safe == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (81)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (184), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (82)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(int) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.write_msr_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.write_msr_safe)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(msr)), "S" ((unsigned long)(low)), "d" ((unsigned long)(high)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.write_msr_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.write_msr_safe)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(msr)), "S" ((unsigned long)(low)), "d" ((unsigned long)(high)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (int)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(int)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} +# 204 "./arch/x86/include/asm/paravirt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wrmsrl(unsigned msr, u64 val) +{ + do { paravirt_write_msr(msr, (u32)val, (u32)(val>>32)); } while (0); +} +# 221 "./arch/x86/include/asm/paravirt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rdmsrl_safe(unsigned msr, unsigned long long *p) +{ + int err; + + *p = paravirt_read_msr_safe(msr, &err); + return err; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long paravirt_read_pmc(int counter) +{ + return ({ u64 __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.read_pmc == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (83)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (231), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (84)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(u64) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.read_pmc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.read_pmc)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(counter)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.read_pmc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.read_pmc)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(counter)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (u64)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(u64)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} +# 243 "./arch/x86/include/asm/paravirt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.alloc_ldt == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (85)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (245), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (86)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.alloc_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.alloc_ldt)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(ldt)), "S" ((unsigned long)(entries)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.free_ldt == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (87)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (250), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (88)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.free_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.free_ldt)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(ldt)), "S" ((unsigned long)(entries)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_TR_desc(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.load_tr_desc == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (89)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (255), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (90)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.load_tr_desc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.load_tr_desc)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_gdt(const struct desc_ptr *dtr) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.load_gdt == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (91)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (259), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (92)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.load_gdt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.load_gdt)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(dtr)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_idt(const struct desc_ptr *dtr) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.load_idt == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (93)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (263), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (94)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.load_idt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.load_idt)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(dtr)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_ldt(const void *addr, unsigned entries) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.set_ldt == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (95)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (267), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (96)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.set_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.set_ldt)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(addr)), "S" ((unsigned long)(entries)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long paravirt_store_tr(void) +{ + return ({ unsigned long __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.store_tr == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (97)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (271), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (98)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(unsigned long) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.store_tr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.store_tr)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.store_tr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.store_tr)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (unsigned long)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(unsigned long)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_TLS(struct thread_struct *t, unsigned cpu) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.load_tls == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (99)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (277), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (100)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.load_tls) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.load_tls)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(t)), "S" ((unsigned long)(cpu)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_gs_index(unsigned int gs) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.load_gs_index == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (101)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (283), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (102)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.load_gs_index) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.load_gs_index)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(gs)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_ldt_entry(struct desc_struct *dt, int entry, + const void *desc) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.write_ldt_entry == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (103)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (290), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (104)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.write_ldt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.write_ldt_entry)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(dt)), "S" ((unsigned long)(entry)), "d" ((unsigned long)(desc)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_gdt_entry(struct desc_struct *dt, int entry, + void *desc, int type) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.write_gdt_entry == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (105)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (296), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (106)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.write_gdt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.write_gdt_entry)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(dt)), "S" ((unsigned long)(entry)), "d" ((unsigned long)(desc)), "c" ((unsigned long)(type)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.write_idt_entry == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (107)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (301), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (108)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.write_idt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.write_idt_entry)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(dt)), "S" ((unsigned long)(entry)), "d" ((unsigned long)(g)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tss_update_io_bitmap(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.update_io_bitmap == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (109)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (307), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (110)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.update_io_bitmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.update_io_bitmap)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_activate_mm(struct mm_struct *prev, + struct mm_struct *next) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.activate_mm == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (111)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (314), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (112)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.activate_mm) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.activate_mm)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(prev)), "S" ((unsigned long)(next)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.dup_mmap == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (113)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (320), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (114)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.dup_mmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.dup_mmap)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(oldmm)), "S" ((unsigned long)(mm)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int paravirt_pgd_alloc(struct mm_struct *mm) +{ + return ({ int __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pgd_alloc == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (115)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (325), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (116)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(int) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pgd_alloc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pgd_alloc)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pgd_alloc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pgd_alloc)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (int)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(int)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pgd_free == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (117)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (330), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (118)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pgd_free) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pgd_free)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)), "S" ((unsigned long)(pgd)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.alloc_pte == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (119)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (335), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (120)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.alloc_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.alloc_pte)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)), "S" ((unsigned long)(pfn)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_release_pte(unsigned long pfn) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.release_pte == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (121)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (339), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (122)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.release_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.release_pte)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pfn)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.alloc_pmd == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (123)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (344), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (124)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.alloc_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.alloc_pmd)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)), "S" ((unsigned long)(pfn)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_release_pmd(unsigned long pfn) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.release_pmd == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (125)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (349), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (126)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.release_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.release_pmd)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pfn)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.alloc_pud == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (127)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (354), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (128)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.alloc_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.alloc_pud)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)), "S" ((unsigned long)(pfn)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_release_pud(unsigned long pfn) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.release_pud == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (129)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (358), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (130)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.release_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.release_pud)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pfn)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.alloc_p4d == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (131)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (363), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (132)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.alloc_p4d) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.alloc_p4d)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)), "S" ((unsigned long)(pfn)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void paravirt_release_p4d(unsigned long pfn) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.release_p4d == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (133)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (368), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (134)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.release_p4d) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.release_p4d)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pfn)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t __pte(pteval_t val) +{ + pteval_t ret; + + if (sizeof(pteval_t) > sizeof(long)) + ret = ({ pteval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.make_pte.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (135)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (376), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (136)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pteval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pte.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)), "S" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pte.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)), "S" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pteval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pteval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + else + ret = ({ pteval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.make_pte.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (137)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (378), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (138)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pteval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pte.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pte.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pteval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pteval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return (pte_t) { .pte = ret }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pteval_t pte_val(pte_t pte) +{ + pteval_t ret; + + if (sizeof(pteval_t) > sizeof(long)) + ret = ({ pteval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pte_val.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (139)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (388), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (140)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pteval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pte_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pte.pte)), "S" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pte_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pte.pte)), "S" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); __ret = (pteval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pteval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }) + ; + else + ret = ({ pteval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pte_val.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (141)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (391), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (142)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pteval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pte_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pte.pte)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pte_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pte.pte)) : "memory", "cc" ); __ret = (pteval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pteval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t __pgd(pgdval_t val) +{ + pgdval_t ret; + + if (sizeof(pgdval_t) > sizeof(long)) + ret = ({ pgdval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.make_pgd.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (143)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (401), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (144)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pgdval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)), "S" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)), "S" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pgdval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pgdval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + else + ret = ({ pgdval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.make_pgd.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (145)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (403), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (146)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pgdval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pgdval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pgdval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return (pgd_t) { ret }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgdval_t pgd_val(pgd_t pgd) +{ + pgdval_t ret; + + if (sizeof(pgdval_t) > sizeof(long)) + ret = ({ pgdval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pgd_val.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (147)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (413), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (148)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pgdval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pgd.pgd)), "S" ((unsigned long)((u64)pgd.pgd >> 32)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pgd.pgd)), "S" ((unsigned long)((u64)pgd.pgd >> 32)) : "memory", "cc" ); __ret = (pgdval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pgdval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }) + ; + else + ret = ({ pgdval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pgd_val.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (149)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (416), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (150)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pgdval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pgd.pgd)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pgd.pgd)) : "memory", "cc" ); __ret = (pgdval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pgdval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return ret; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pteval_t ret; + + ret = ({ pteval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.ptep_modify_prot_start == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (151)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (427), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (152)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pteval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.ptep_modify_prot_start) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.ptep_modify_prot_start)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(vma)), "S" ((unsigned long)(addr)), "d" ((unsigned long)(ptep)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.ptep_modify_prot_start) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.ptep_modify_prot_start)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(vma)), "S" ((unsigned long)(addr)), "d" ((unsigned long)(ptep)) : "memory", "cc" , "r8", "r9", "r10", "r11"); __ret = (pteval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pteval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return (pte_t) { .pte = ret }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) +{ + + if (sizeof(pteval_t) > sizeof(long)) + + pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte); + else + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.ptep_modify_prot_commit == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (153)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (440), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (154)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.ptep_modify_prot_commit) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.ptep_modify_prot_commit)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(vma)), "S" ((unsigned long)(addr)), "d" ((unsigned long)(ptep)), "c" ((unsigned long)(pte.pte)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pte(pte_t *ptep, pte_t pte) +{ + if (sizeof(pteval_t) > sizeof(long)) + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_pte == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (155)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (447), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (156)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_pte)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(ptep)), "S" ((unsigned long)(pte.pte)), "d" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); + else + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_pte == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (157)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (449), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (158)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_pte)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(ptep)), "S" ((unsigned long)(pte.pte)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + if (sizeof(pteval_t) > sizeof(long)) + + pv_ops.mmu.set_pte_at(mm, addr, ptep, pte); + else + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_pte_at == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (159)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (459), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (160)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_pte_at) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_pte_at)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(mm)), "S" ((unsigned long)(addr)), "d" ((unsigned long)(ptep)), "c" ((unsigned long)(pte.pte)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + pmdval_t val = native_pmd_val(pmd); + + if (sizeof(pmdval_t) > sizeof(long)) + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_pmd == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (161)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (467), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (162)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_pmd)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pmdp)), "S" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); + else + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_pmd == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (163)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (469), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (164)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_pmd)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pmdp)), "S" ((unsigned long)(val)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t __pmd(pmdval_t val) +{ + pmdval_t ret; + + if (sizeof(pmdval_t) > sizeof(long)) + ret = ({ pmdval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.make_pmd.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (165)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (478), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (166)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pmdval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)), "S" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)), "S" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pmdval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pmdval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + else + ret = ({ pmdval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.make_pmd.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (167)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (480), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (168)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pmdval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pmdval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pmdval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return (pmd_t) { ret }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmdval_t pmd_val(pmd_t pmd) +{ + pmdval_t ret; + + if (sizeof(pmdval_t) > sizeof(long)) + ret = ({ pmdval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pmd_val.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (169)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (490), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (170)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pmdval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pmd.pmd)), "S" ((unsigned long)((u64)pmd.pmd >> 32)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pmd.pmd)), "S" ((unsigned long)((u64)pmd.pmd >> 32)) : "memory", "cc" ); __ret = (pmdval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pmdval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }) + ; + else + ret = ({ pmdval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pmd_val.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (171)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (493), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (172)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pmdval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pmd.pmd)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pmd.pmd)) : "memory", "cc" ); __ret = (pmdval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pmdval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pud(pud_t *pudp, pud_t pud) +{ + pudval_t val = native_pud_val(pud); + + if (sizeof(pudval_t) > sizeof(long)) + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_pud == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (173)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (503), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (174)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_pud)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pudp)), "S" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); + else + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_pud == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (175)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (505), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (176)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_pud)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pudp)), "S" ((unsigned long)(val)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t __pud(pudval_t val) +{ + pudval_t ret; + + ret = ({ pudval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.make_pud.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (177)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (512), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (178)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pudval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pud.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pud.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pudval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_pud.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_pud.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pudval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pudval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return (pud_t) { ret }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pudval_t pud_val(pud_t pud) +{ + return ({ pudval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.pud_val.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (179)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (519), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (180)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(pudval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pud_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pud_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pud.pud)) : "memory", "cc" ); __ret = (pudval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.pud_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.pud_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(pud.pud)) : "memory", "cc" ); __ret = (pudval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(pudval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pud_clear(pud_t *pudp) +{ + set_pud(pudp, __pud(0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + p4dval_t val = native_p4d_val(p4d); + + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_p4d == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (181)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (531), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (182)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_p4d) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_p4d)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(p4dp)), "S" ((unsigned long)(val)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t __p4d(p4dval_t val) +{ + p4dval_t ret = ({ p4dval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.make_p4d.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (183)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (538), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (184)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(p4dval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_p4d.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_p4d.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (p4dval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.make_p4d.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.make_p4d.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(val)) : "memory", "cc" ); __ret = (p4dval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(p4dval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); + + return (p4d_t) { ret }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4dval_t p4d_val(p4d_t p4d) +{ + return ({ p4dval_t __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.p4d_val.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (185)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (545), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (186)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(p4dval_t) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.p4d_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.p4d_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(p4d.p4d)) : "memory", "cc" ); __ret = (p4dval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.p4d_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.p4d_val.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(p4d.p4d)) : "memory", "cc" ); __ret = (p4dval_t)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(p4dval_t)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __set_pgd(pgd_t *pgdp, pgd_t pgd) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.set_pgd == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (187)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (550), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (188)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.set_pgd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.set_pgd)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(pgdp)), "S" ((unsigned long)(native_pgd_val(pgd))) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} +# 567 "./arch/x86/include/asm/paravirt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void p4d_clear(p4d_t *p4dp) +{ + set_p4d(p4dp, __p4d(0)); +} +# 595 "./arch/x86/include/asm/paravirt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pte_atomic(pte_t *ptep, pte_t pte) +{ + set_pte(ptep, pte); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + set_pte_at(mm, addr, ptep, __pte(0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pmd_clear(pmd_t *pmdp) +{ + set_pmd(pmdp, __pmd(0)); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_start_context_switch(struct task_struct *prev) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.start_context_switch == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (189)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (615), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (190)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.start_context_switch) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.start_context_switch)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(prev)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_end_context_switch(struct task_struct *next) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.cpu.end_context_switch == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (191)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (620), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (192)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, cpu.end_context_switch) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.cpu.end_context_switch)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(next)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_enter_lazy_mmu_mode(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.lazy_mode.enter == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (193)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (626), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (194)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.lazy_mode.enter) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.lazy_mode.enter)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_leave_lazy_mmu_mode(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.lazy_mode.leave == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (195)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (631), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (196)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.lazy_mode.leave) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.lazy_mode.leave)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_flush_lazy_mmu_mode(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.mmu.lazy_mode.flush == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (197)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (636), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (198)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, mmu.lazy_mode.flush) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.mmu.lazy_mode.flush)), [paravirt_clobber] "i" (((1 << 9) - 1)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __set_fixmap(unsigned idx, + phys_addr_t phys, pgprot_t flags) +{ + pv_ops.mmu.set_fixmap(idx, phys, flags); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void pv_queued_spin_lock_slowpath(struct qspinlock *lock, + u32 val) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.lock.queued_spin_lock_slowpath == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (199)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (651), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (200)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, lock.queued_spin_lock_slowpath) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.lock.queued_spin_lock_slowpath)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(lock)), "S" ((unsigned long)(val)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void pv_queued_spin_unlock(struct qspinlock *lock) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.lock.queued_spin_unlock.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (201)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (656), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (202)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, lock.queued_spin_unlock.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.lock.queued_spin_unlock.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(lock)) : "memory", "cc" ); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void pv_wait(u8 *ptr, u8 val) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.lock.wait == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (203)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (661), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (204)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, lock.wait) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.lock.wait)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(ptr)), "S" ((unsigned long)(val)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void pv_kick(int cpu) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.lock.kick == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (205)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (666), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (206)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=D" (__edi), "=S" (__esi), "=d" (__edx), "=c" (__ecx), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, lock.kick) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.lock.kick)), [paravirt_clobber] "i" (((1 << 9) - 1)), "D" ((unsigned long)(cpu)) : "memory", "cc" , "rax", "r8", "r9", "r10", "r11"); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool pv_vcpu_is_preempted(long cpu) +{ + return ({ bool __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.lock.vcpu_is_preempted.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (207)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (671), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (208)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(bool) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, lock.vcpu_is_preempted.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.lock.vcpu_is_preempted.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(cpu)) : "memory", "cc" ); __ret = (bool)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, lock.vcpu_is_preempted.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.lock.vcpu_is_preempted.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(cpu)) : "memory", "cc" ); __ret = (bool)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(bool)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); +bool __raw_callee_save___native_vcpu_is_preempted(long cpu); +# 758 "./arch/x86/include/asm/paravirt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) unsigned long arch_local_save_flags(void) +{ + return ({ unsigned long __ret; unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.irq.save_fl.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (209)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (760), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (210)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); if (sizeof(unsigned long) > sizeof(unsigned long)) { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, irq.save_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.irq.save_fl.func)), [paravirt_clobber] "i" (((1 << 0))) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, irq.save_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.irq.save_fl.func)), [paravirt_clobber] "i" (((1 << 0))) : "memory", "cc" ); __ret = (unsigned long)(__eax & ({ unsigned long __mask = ~0UL; switch (sizeof(unsigned long)) { case 1: __mask = 0xffUL; break; case 2: __mask = 0xffffUL; break; case 4: __mask = 0xffffffffUL; break; default: break; } __mask; })); } __ret; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void arch_local_irq_restore(unsigned long f) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.irq.restore_fl.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (211)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (765), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (212)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, irq.restore_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.irq.restore_fl.func)), [paravirt_clobber] "i" (((1 << 0))), "D" ((unsigned long)(f)) : "memory", "cc" ); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void arch_local_irq_disable(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.irq.irq_disable.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (213)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (770), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (214)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, irq.irq_disable.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.irq.irq_disable.func)), [paravirt_clobber] "i" (((1 << 0))) : "memory", "cc" ); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void arch_local_irq_enable(void) +{ + ({ unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx, __eax = __eax;; do { if (__builtin_expect(!!(pv_ops.irq.irq_enable.func == ((void *)0)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (215)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/paravirt.h"), "i" (775), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (216)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); asm volatile("" "771:\n\t" "999:\n\t" ".pushsection .discard.retpoline_safe\n\t" " " ".quad" " " " 999b\n\t" ".popsection\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 8" " " "\n" " " ".quad" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "+r" (current_stack_pointer) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template, irq.irq_enable.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_ops.irq.irq_enable.func)), [paravirt_clobber] "i" (((1 << 0))) : "memory", "cc" ); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) unsigned long arch_local_irq_save(void) +{ + unsigned long f; + + f = arch_local_save_flags(); + arch_local_irq_disable(); + return f; +} +# 804 "./arch/x86/include/asm/paravirt.h" +extern void default_banner(void); +# 258 "./arch/x86/include/asm/msr.h" 2 +# 324 "./arch/x86/include/asm/msr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int wrmsrl_safe(u32 msr, u64 val) +{ + return paravirt_write_msr_safe(msr, (u32)val, (u32)(val >> 32)); +} + + + + + +struct msr *msrs_alloc(void); +void msrs_free(struct msr *msrs); +int msr_set_bit(u32 msr, u8 bit); +int msr_clear_bit(u32 msr, u8 bit); + + +int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); +int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); +int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); +void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); +void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); +int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); +int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); +int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); +int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); +int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); +# 23 "./arch/x86/include/asm/processor.h" 2 + + +# 1 "./arch/x86/include/asm/special_insns.h" 1 +# 10 "./arch/x86/include/asm/special_insns.h" +# 1 "./include/linux/irqflags.h" 1 +# 16 "./include/linux/irqflags.h" +# 1 "./arch/x86/include/asm/irqflags.h" 1 +# 19 "./arch/x86/include/asm/irqflags.h" +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_save_fl(void); +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long native_save_fl(void) +{ + unsigned long flags; + + + + + + + asm volatile("# __raw_save_flags\n\t" + "pushf ; pop %0" + : "=rm" (flags) + : + : "memory"); + + return flags; +} + +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_restore_fl(unsigned long flags); +extern inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_restore_fl(unsigned long flags) +{ + asm volatile("push %0 ; popf" + : + :"g" (flags) + :"memory", "cc"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void native_irq_disable(void) +{ + asm volatile("cli": : :"memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void native_irq_enable(void) +{ + asm volatile("sti": : :"memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__section__(".cpuidle.text"))) void native_safe_halt(void) +{ + mds_idle_clear_cpu_buffers(); + asm volatile("sti; hlt": : :"memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__section__(".cpuidle.text"))) void native_halt(void) +{ + mds_idle_clear_cpu_buffers(); + asm volatile("hlt": : :"memory"); +} +# 162 "./arch/x86/include/asm/irqflags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & (((1UL)) << (9))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int arch_irqs_disabled(void) +{ + unsigned long flags = arch_local_save_flags(); + + return arch_irqs_disabled_flags(flags); +} +# 17 "./include/linux/irqflags.h" 2 + + + + extern void lockdep_softirqs_on(unsigned long ip); + extern void lockdep_softirqs_off(unsigned long ip); + extern void lockdep_hardirqs_on_prepare(unsigned long ip); + extern void lockdep_hardirqs_on(unsigned long ip); + extern void lockdep_hardirqs_off(unsigned long ip); +# 34 "./include/linux/irqflags.h" + extern void trace_hardirqs_on_prepare(void); + extern void trace_hardirqs_off_finish(void); + extern void trace_hardirqs_on(void); + extern void trace_hardirqs_off(void); +# 126 "./include/linux/irqflags.h" + extern void stop_critical_timings(void); + extern void start_critical_timings(void); +# 11 "./arch/x86/include/asm/special_insns.h" 2 +# 20 "./arch/x86/include/asm/special_insns.h" +extern unsigned long __force_order; + +void native_write_cr0(unsigned long val); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_read_cr0(void) +{ + unsigned long val; + asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); + return val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long native_read_cr2(void) +{ + unsigned long val; + asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); + return val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void native_write_cr2(unsigned long val) +{ + asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __native_read_cr3(void) +{ + unsigned long val; + asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); + return val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_write_cr3(unsigned long val) +{ + asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_read_cr4(void) +{ + unsigned long val; +# 70 "./arch/x86/include/asm/special_insns.h" + asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); + + return val; +} + +void native_write_cr4(unsigned long val); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 rdpkru(void) +{ + u32 ecx = 0; + u32 edx, pkru; + + + + + + asm volatile(".byte 0x0f,0x01,0xee\n\t" + : "=a" (pkru), "=d" (edx) + : "c" (ecx)); + return pkru; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wrpkru(u32 pkru) +{ + u32 ecx = 0, edx = 0; + + + + + + asm volatile(".byte 0x0f,0x01,0xef\n\t" + : : "a" (pkru), "c"(ecx), "d"(edx)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __write_pkru(u32 pkru) +{ + + + + + if (pkru == rdpkru()) + return; + + wrpkru(pkru); +} +# 128 "./arch/x86/include/asm/special_insns.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_wbinvd(void) +{ + asm volatile("wbinvd": : :"memory"); +} + +extern void asm_load_gs_index(unsigned int selector); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_load_gs_index(unsigned int selector) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + asm_load_gs_index(selector); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __read_cr4(void) +{ + return native_read_cr4(); +} +# 208 "./arch/x86/include/asm/special_insns.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clflush(volatile void *__p) +{ + asm volatile("clflush %0" : "+m" (*(volatile char *)__p)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clflushopt(volatile void *__p) +{ + asm volatile ("# ALT: oldnstr\n" "661:\n\t" ".byte " "0x3e" "; clflush %P0" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+23)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" ".byte 0x66; clflush %P0" "\n" "665""1" ":\n" ".popsection\n" : "+m" (*(volatile char *)__p) : "i" (0)) + + + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clwb(volatile void *__p) +{ + volatile struct { char x[64]; } *p = __p; + + asm volatile("# ALT: oldinstr2\n" "661:\n\t" ".byte " "0x3e" "; clflush (%[pax])" "\n662:\n" "# ALT: padding2\n" ".skip -((" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")) > 0) * " "(" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")), 0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+23)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" " .long 661b - .\n" " .long " "664""2""f - .\n" " .word " "( 9*32+24)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""2""f-""664""2""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" ".byte 0x66; clflush (%[pax])" "\n" "665""1" ":\n" "# ALT: replacement " "2" "\n" "664""2"":\n\t" ".byte 0x66, 0x0f, 0xae, 0x30" "\n" "665""2" ":\n" ".popsection\n" + + + + + + : [p] "+m" (*p) + : [pax] "a" (p)); +} +# 26 "./arch/x86/include/asm/processor.h" 2 +# 1 "./arch/x86/include/asm/fpu/types.h" 1 +# 12 "./arch/x86/include/asm/fpu/types.h" +struct fregs_state { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + + + u32 st_space[20]; + + + u32 status; +}; + + + + + + + +struct fxregs_state { + u16 cwd; + u16 swd; + u16 twd; + u16 fop; + union { + struct { + u64 rip; + u64 rdp; + }; + struct { + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + }; + }; + u32 mxcsr; + u32 mxcsr_mask; + + + u32 st_space[32]; + + + u32 xmm_space[64]; + + u32 padding[12]; + + union { + u32 padding1[12]; + u32 sw_reserved[12]; + }; + +} __attribute__((aligned(16))); +# 79 "./arch/x86/include/asm/fpu/types.h" +struct swregs_state { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + + u32 st_space[20]; + u8 ftop; + u8 changed; + u8 lookahead; + u8 no_update; + u8 rm; + u8 alimit; + struct math_emu_info *info; + u32 entry_eip; +}; + + + + +enum xfeature { + XFEATURE_FP, + XFEATURE_SSE, + + + + + XFEATURE_YMM, + XFEATURE_BNDREGS, + XFEATURE_BNDCSR, + XFEATURE_OPMASK, + XFEATURE_ZMM_Hi256, + XFEATURE_Hi16_ZMM, + XFEATURE_PT_UNIMPLEMENTED_SO_FAR, + XFEATURE_PKRU, + + XFEATURE_MAX, +}; +# 139 "./arch/x86/include/asm/fpu/types.h" +struct reg_128_bit { + u8 regbytes[128/8]; +}; +struct reg_256_bit { + u8 regbytes[256/8]; +}; +struct reg_512_bit { + u8 regbytes[512/8]; +}; +# 159 "./arch/x86/include/asm/fpu/types.h" +struct ymmh_struct { + struct reg_128_bit hi_ymm[16]; +} __attribute__((__packed__)); + + + +struct mpx_bndreg { + u64 lower_bound; + u64 upper_bound; +} __attribute__((__packed__)); + + + +struct mpx_bndreg_state { + struct mpx_bndreg bndreg[4]; +} __attribute__((__packed__)); + + + + + + +struct mpx_bndcsr { + u64 bndcfgu; + u64 bndstatus; +} __attribute__((__packed__)); + + + + +struct mpx_bndcsr_state { + union { + struct mpx_bndcsr bndcsr; + u8 pad_to_64_bytes[64]; + }; +} __attribute__((__packed__)); + + + + + + + +struct avx_512_opmask_state { + u64 opmask_reg[8]; +} __attribute__((__packed__)); + + + + + + +struct avx_512_zmm_uppers_state { + struct reg_256_bit zmm_upper[16]; +} __attribute__((__packed__)); + + + + + +struct avx_512_hi16_state { + struct reg_512_bit hi16_zmm[16]; +} __attribute__((__packed__)); + + + + + +struct pkru_state { + u32 pkru; + u32 pad; +} __attribute__((__packed__)); + +struct xstate_header { + u64 xfeatures; + u64 xcomp_bv; + u64 reserved[6]; +} __attribute__((packed)); +# 253 "./arch/x86/include/asm/fpu/types.h" +struct xregs_state { + struct fxregs_state i387; + struct xstate_header header; + u8 extended_state_area[0]; +} __attribute__ ((packed, aligned (64))); +# 268 "./arch/x86/include/asm/fpu/types.h" +union fpregs_state { + struct fregs_state fsave; + struct fxregs_state fxsave; + struct swregs_state soft; + struct xregs_state xsave; + u8 __padding[((1UL) << 12)]; +}; + + + + + + +struct fpu { +# 294 "./arch/x86/include/asm/fpu/types.h" + unsigned int last_cpu; + + + + + + + unsigned long avx512_timestamp; +# 312 "./arch/x86/include/asm/fpu/types.h" + union fpregs_state state; + + + + +}; +# 27 "./arch/x86/include/asm/processor.h" 2 + +# 1 "./arch/x86/include/asm/vmxfeatures.h" 1 +# 29 "./arch/x86/include/asm/processor.h" 2 +# 1 "./arch/x86/include/asm/vdso/processor.h" 1 +# 11 "./arch/x86/include/asm/vdso/processor.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void rep_nop(void) +{ + asm volatile("rep; nop" ::: "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void cpu_relax(void) +{ + rep_nop(); +} +# 30 "./arch/x86/include/asm/processor.h" 2 + +# 1 "./include/linux/personality.h" 1 + + + + +# 1 "./include/uapi/linux/personality.h" 1 +# 11 "./include/uapi/linux/personality.h" +enum { + UNAME26 = 0x0020000, + ADDR_NO_RANDOMIZE = 0x0040000, + FDPIC_FUNCPTRS = 0x0080000, + + + MMAP_PAGE_ZERO = 0x0100000, + ADDR_COMPAT_LAYOUT = 0x0200000, + READ_IMPLIES_EXEC = 0x0400000, + ADDR_LIMIT_32BIT = 0x0800000, + SHORT_INODE = 0x1000000, + WHOLE_SECONDS = 0x2000000, + STICKY_TIMEOUTS = 0x4000000, + ADDR_LIMIT_3GB = 0x8000000, +}; +# 42 "./include/uapi/linux/personality.h" +enum { + PER_LINUX = 0x0000, + PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, + PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, + PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, + PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, + PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | + WHOLE_SECONDS | SHORT_INODE, + PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, + PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, + PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, + PER_BSD = 0x0006, + PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, + PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, + PER_LINUX32 = 0x0008, + PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, + PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS, + PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS, + PER_IRIX64 = 0x000b | STICKY_TIMEOUTS, + PER_RISCOS = 0x000c, + PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, + PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, + PER_OSF4 = 0x000f, + PER_HPUX = 0x0010, + PER_MASK = 0x00ff, +}; +# 6 "./include/linux/personality.h" 2 +# 32 "./arch/x86/include/asm/processor.h" 2 + + + +# 1 "./include/linux/err.h" 1 + + + + + + + +# 1 "./arch/x86/include/generated/uapi/asm/errno.h" 1 +# 9 "./include/linux/err.h" 2 +# 24 "./include/linux/err.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void * __attribute__((__warn_unused_result__)) ERR_PTR(long error) +{ + return (void *) error; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long __attribute__((__warn_unused_result__)) PTR_ERR( const void *ptr) +{ + return (long) ptr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) IS_ERR( const void *ptr) +{ + return __builtin_expect(!!((unsigned long)(void *)((unsigned long)ptr) >= (unsigned long)-4095), 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) IS_ERR_OR_NULL( const void *ptr) +{ + return __builtin_expect(!!(!ptr), 0) || __builtin_expect(!!((unsigned long)(void *)((unsigned long)ptr) >= (unsigned long)-4095), 0); +} +# 51 "./include/linux/err.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void * __attribute__((__warn_unused_result__)) ERR_CAST( const void *ptr) +{ + + return (void *) ptr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) PTR_ERR_OR_ZERO( const void *ptr) +{ + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + else + return 0; +} +# 36 "./arch/x86/include/asm/processor.h" 2 +# 62 "./arch/x86/include/asm/processor.h" +enum tlb_infos { + ENTRIES, + NR_INFO +}; + +extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lli_4k[NR_INFO]; +extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lli_2m[NR_INFO]; +extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lli_4m[NR_INFO]; +extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lld_4k[NR_INFO]; +extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lld_2m[NR_INFO]; +extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lld_4m[NR_INFO]; +extern u16 __attribute__((__section__(".data..read_mostly"))) tlb_lld_1g[NR_INFO]; + + + + + + + +struct cpuinfo_x86 { + __u8 x86; + __u8 x86_vendor; + __u8 x86_model; + __u8 x86_stepping; + + + int x86_tlbsize; + + + __u32 vmx_capability[3]; + + __u8 x86_virt_bits; + __u8 x86_phys_bits; + + __u8 x86_coreid_bits; + __u8 cu_id; + + __u32 extended_cpuid_level; + + int cpuid_level; + + + + + + union { + __u32 x86_capability[19 + 1]; + unsigned long x86_capability_alignment; + }; + char x86_vendor_id[16]; + char x86_model_id[64]; + + unsigned int x86_cache_size; + int x86_cache_alignment; + + int x86_cache_max_rmid; + int x86_cache_occ_scale; + int x86_cache_mbm_width_offset; + int x86_power; + unsigned long loops_per_jiffy; + + u16 x86_max_cores; + u16 apicid; + u16 initial_apicid; + u16 x86_clflush_size; + + u16 booted_cores; + + u16 phys_proc_id; + + u16 logical_proc_id; + + u16 cpu_core_id; + u16 cpu_die_id; + u16 logical_die_id; + + u16 cpu_index; + u32 microcode; + + u8 x86_cache_bits; + unsigned initialized : 1; +} __attribute__((__designated_init__)); + +struct cpuid_regs { + u32 eax, ebx, ecx, edx; +}; + +enum cpuid_regs_idx { + CPUID_EAX = 0, + CPUID_EBX, + CPUID_ECX, + CPUID_EDX, +}; +# 172 "./arch/x86/include/asm/processor.h" +extern struct cpuinfo_x86 boot_cpu_data; +extern struct cpuinfo_x86 new_cpu_data; + +extern __u32 cpu_caps_cleared[19 + 1]; +extern __u32 cpu_caps_set[19 + 1]; + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_info; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(struct cpuinfo_x86) cpu_info; + + + + + + +extern const struct seq_operations cpuinfo_op; + + + +extern void cpu_detect(struct cpuinfo_x86 *c); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long long l1tf_pfn_limit(void) +{ + return ((((1ULL))) << (boot_cpu_data.x86_cache_bits - 1 - 12)); +} + +extern void early_cpu_init(void); +extern void identify_boot_cpu(void); +extern void identify_secondary_cpu(struct cpuinfo_x86 *); +extern void print_cpu_info(struct cpuinfo_x86 *); +void print_cpu_msr(struct cpuinfo_x86 *); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int have_cpuid_p(void) +{ + return 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_cpuid(unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + + asm volatile("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (*eax), "2" (*ecx) + : "memory"); +} +# 237 "./arch/x86/include/asm/processor.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int native_cpuid_eax(unsigned int op) { unsigned int eax = op, ebx, ecx = 0, edx; native_cpuid(&eax, &ebx, &ecx, &edx); return eax; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int native_cpuid_ebx(unsigned int op) { unsigned int eax = op, ebx, ecx = 0, edx; native_cpuid(&eax, &ebx, &ecx, &edx); return ebx; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int native_cpuid_ecx(unsigned int op) { unsigned int eax = op, ebx, ecx = 0, edx; native_cpuid(&eax, &ebx, &ecx, &edx); return ecx; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int native_cpuid_edx(unsigned int op) { unsigned int eax = op, ebx, ecx = 0, edx; native_cpuid(&eax, &ebx, &ecx, &edx); return edx; } + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long read_cr3_pa(void) +{ + return __read_cr3() & ((0x7FFFFFFFFFFFF000ull) & ~sme_me_mask); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long native_read_cr3_pa(void) +{ + return __native_read_cr3() & ((0x7FFFFFFFFFFFF000ull) & ~sme_me_mask); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void load_cr3(pgd_t *pgdir) +{ + write_cr3((__phys_addr((unsigned long)(pgdir)) | sme_me_mask)); +} +# 314 "./arch/x86/include/asm/processor.h" +struct x86_hw_tss { + u32 reserved1; + u64 sp0; + + + + + + u64 sp1; + + + + + + + u64 sp2; + + u64 reserved2; + u64 ist[7]; + u32 reserved3; + u32 reserved4; + u16 reserved5; + u16 io_bitmap_base; + +} __attribute__((packed)); +# 372 "./arch/x86/include/asm/processor.h" +struct entry_stack { + unsigned long words[64]; +}; + +struct entry_stack_page { + struct entry_stack stack; +} __attribute__((__aligned__(((1UL) << 12)))); + + + + +struct x86_io_bitmap { + + u64 prev_sequence; +# 394 "./arch/x86/include/asm/processor.h" + unsigned int prev_max; + + + + + + + + unsigned long bitmap[((65536 / 8) / sizeof(long)) + 1]; + + + + + + unsigned long mapall[((65536 / 8) / sizeof(long)) + 1]; +}; + +struct tss_struct { + + + + + + struct x86_hw_tss x86_tss; + + struct x86_io_bitmap io_bitmap; +} __attribute__((__aligned__(((1UL) << 12)))); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_tss_rw; extern __attribute__((section(".data..percpu" "..page_aligned"))) __typeof__(struct tss_struct) cpu_tss_rw __attribute__((__aligned__(((1UL) << 12)))); + + +struct irq_stack { + char stack[(((1UL) << 12) << (2 + 1))]; +} __attribute__((__aligned__((((1UL) << 12) << (2 + 1))))); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_hardirq_stack_ptr; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct irq_stack *) hardirq_stack_ptr; +# 439 "./arch/x86/include/asm/processor.h" +struct fixed_percpu_data { + + + + + + char gs_base[40]; + unsigned long stack_canary; +}; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_fixed_percpu_data; extern __attribute__((section(".data..percpu" "..first"))) __typeof__(struct fixed_percpu_data) fixed_percpu_data __attribute__((__externally_visible__)); +extern typeof(fixed_percpu_data) init_per_cpu__fixed_percpu_data; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long cpu_kernelmode_gs_base(int cpu) +{ + return (unsigned long)(*({ do { const void *__vpp_verify = (typeof((&(fixed_percpu_data.gs_base)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(fixed_percpu_data.gs_base)))) *)((&(fixed_percpu_data.gs_base))))); (typeof((typeof(*((&(fixed_percpu_data.gs_base)))) *)((&(fixed_percpu_data.gs_base))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); +} + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_irq_count; extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned int) irq_count; +extern void ignore_sysret(void); + + + +void save_fsgs_for_kvm(void); +# 482 "./arch/x86/include/asm/processor.h" +extern unsigned int fpu_kernel_xstate_size; +extern unsigned int fpu_user_xstate_size; + +struct perf_event; + +typedef struct { + unsigned long seg; +} mm_segment_t; + +struct thread_struct { + + struct desc_struct tls_array[3]; + + + + unsigned long sp; + + + + unsigned short es; + unsigned short ds; + unsigned short fsindex; + unsigned short gsindex; + + + + unsigned long fsbase; + unsigned long gsbase; +# 520 "./arch/x86/include/asm/processor.h" + struct perf_event *ptrace_bps[4]; + + unsigned long debugreg6; + + unsigned long ptrace_dr7; + + unsigned long cr2; + unsigned long trap_nr; + unsigned long error_code; + + + + + + struct io_bitmap *io_bitmap; + + + + + + + unsigned long iopl_emul; + + mm_segment_t addr_limit; + + unsigned int sig_on_uaccess_err:1; + + + struct fpu fpu; + + + + +}; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_thread_struct_whitelist(unsigned long *offset, + unsigned long *size) +{ + *offset = __builtin_offsetof(struct thread_struct, fpu.state); + *size = fpu_kernel_xstate_size; +} +# 572 "./arch/x86/include/asm/processor.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +native_load_sp0(unsigned long sp0) +{ + do { do { const void *__vpp_verify = (typeof((&(cpu_tss_rw.x86_tss.sp0)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_tss_rw.x86_tss.sp0)) { case 1: do { typedef typeof((cpu_tss_rw.x86_tss.sp0)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (sp0); (void)pto_tmp__; } switch (sizeof((cpu_tss_rw.x86_tss.sp0))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "qi" ((pto_T__)(sp0))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "re" ((pto_T__)(sp0))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((cpu_tss_rw.x86_tss.sp0)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (sp0); (void)pto_tmp__; } switch (sizeof((cpu_tss_rw.x86_tss.sp0))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "qi" ((pto_T__)(sp0))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "re" ((pto_T__)(sp0))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((cpu_tss_rw.x86_tss.sp0)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (sp0); (void)pto_tmp__; } switch (sizeof((cpu_tss_rw.x86_tss.sp0))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "qi" ((pto_T__)(sp0))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "re" ((pto_T__)(sp0))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((cpu_tss_rw.x86_tss.sp0)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (sp0); (void)pto_tmp__; } switch (sizeof((cpu_tss_rw.x86_tss.sp0))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "qi" ((pto_T__)(sp0))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "ri" ((pto_T__)(sp0))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tss_rw.x86_tss.sp0)) : "re" ((pto_T__)(sp0))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_swapgs(void) +{ + + asm volatile("swapgs" ::: "memory"); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long current_top_of_stack(void) +{ + + + + + + return ({ typeof(cpu_tss_rw.x86_tss.sp1) pfo_ret__; switch (sizeof(cpu_tss_rw.x86_tss.sp1)) { case 1: asm("mov" "b ""%%""gs"":" "%" "P1"",%0" : "=q" (pfo_ret__) : "p" (&(cpu_tss_rw.x86_tss.sp1))); break; case 2: asm("mov" "w ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(cpu_tss_rw.x86_tss.sp1))); break; case 4: asm("mov" "l ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(cpu_tss_rw.x86_tss.sp1))); break; case 8: asm("mov" "q ""%%""gs"":" "%" "P1"",%0" : "=r" (pfo_ret__) : "p" (&(cpu_tss_rw.x86_tss.sp1))); break; default: __bad_percpu_size(); } pfo_ret__; }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool on_thread_stack(void) +{ + return (unsigned long)(current_top_of_stack() - + current_stack_pointer) < (((1UL) << 12) << (2 + 1)); +} +# 614 "./arch/x86/include/asm/processor.h" +extern void release_thread(struct task_struct *); + +unsigned long get_wchan(struct task_struct *p); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpuid(unsigned int op, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + *eax = op; + *ecx = 0; + __cpuid(eax, ebx, ecx, edx); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpuid_count(unsigned int op, int count, + unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + *eax = op; + *ecx = count; + __cpuid(eax, ebx, ecx, edx); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpuid_eax(unsigned int op) +{ + unsigned int eax, ebx, ecx, edx; + + cpuid(op, &eax, &ebx, &ecx, &edx); + + return eax; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpuid_ebx(unsigned int op) +{ + unsigned int eax, ebx, ecx, edx; + + cpuid(op, &eax, &ebx, &ecx, &edx); + + return ebx; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpuid_ecx(unsigned int op) +{ + unsigned int eax, ebx, ecx, edx; + + cpuid(op, &eax, &ebx, &ecx, &edx); + + return ecx; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int cpuid_edx(unsigned int op) +{ + unsigned int eax, ebx, ecx, edx; + + cpuid(op, &eax, &ebx, &ecx, &edx); + + return edx; +} +# 695 "./arch/x86/include/asm/processor.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sync_core(void) +{ +# 728 "./arch/x86/include/asm/processor.h" + unsigned int tmp; + + asm volatile ( + "mov %%ss, %0\n\t" + "pushq %q0\n\t" + "pushq %%rsp\n\t" + "addq $8, (%%rsp)\n\t" + "pushfq\n\t" + "mov %%cs, %0\n\t" + "pushq %q0\n\t" + "pushq $1f\n\t" + "iretq\n\t" + "1:" + : "=&r" (tmp), "+r" (current_stack_pointer) : : "cc", "memory"); + +} + +extern void select_idle_routine(const struct cpuinfo_x86 *c); +extern void amd_e400_c1e_apic_setup(void); + +extern unsigned long boot_option_idle_override; + +enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, + IDLE_POLL}; + +extern void enable_sep_cpu(void); +extern int sysenter_setup(void); + + + +extern struct desc_ptr early_gdt_descr; + +extern void switch_to_new_gdt(int); +extern void load_direct_gdt(int); +extern void load_fixmap_gdt(int); +extern void load_percpu_segment(int); +extern void cpu_init(void); +extern void cr4_init(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_debugctlmsr(void) +{ + unsigned long debugctlmsr = 0; + + + + + + do { debugctlmsr = paravirt_read_msr(0x000001d9); } while (0); + + return debugctlmsr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_debugctlmsr(unsigned long debugctlmsr) +{ + + + + + wrmsrl(0x000001d9, debugctlmsr); +} + +extern void set_task_blockstep(struct task_struct *task, bool on); + + +extern int bootloader_type; +extern int bootloader_version; + +extern char ignore_fpu_irq; +# 814 "./arch/x86/include/asm/processor.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void prefetch(const void *x) +{ + asm volatile ("# ALT: oldnstr\n" "661:\n\t" "prefetcht0 %P1" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 0*32+25)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "prefetchnta %P1" "\n" "665""1" ":\n" ".popsection\n" : : "i" (0), "m" (*(const char *)x)) + + ; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void prefetchw(const void *x) +{ + asm volatile ("# ALT: oldnstr\n" "661:\n\t" "prefetcht0 %P1" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 6*32+ 8)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "prefetchw %P1" "\n" "665""1" ":\n" ".popsection\n" : : "i" (0), "m" (*(const char *)x)) + + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void spin_lock_prefetch(const void *x) +{ + prefetchw(x); +} +# 913 "./arch/x86/include/asm/processor.h" +extern unsigned long KSTK_ESP(struct task_struct *task); + + + +extern void start_thread(struct pt_regs *regs, unsigned long new_ip, + unsigned long new_sp); +# 933 "./arch/x86/include/asm/processor.h" +extern int get_tsc_mode(unsigned long adr); +extern int set_tsc_mode(unsigned int val); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_msr_misc_features_shadow; extern __attribute__((section(".data..percpu" ""))) __typeof__(u64) msr_misc_features_shadow; + + +extern u16 amd_get_nb_id(int cpu); +extern u32 amd_get_nodes_per_socket(void); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) +{ + uint32_t base, eax, signature[3]; + + for (base = 0x40000000; base < 0x40010000; base += 0x100) { + cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); + + if (!memcmp(sig, signature, 12) && + (leaves == 0 || ((eax - base) >= leaves))) + return base; + } + + return 0; +} + +extern unsigned long arch_align_stack(unsigned long sp); +void free_init_pages(const char *what, unsigned long begin, unsigned long end); +extern void free_kernel_image_pages(const char *what, void *begin, void *end); + +void default_idle(void); + +bool xen_set_default_idle(void); + + + + +void stop_this_cpu(void *dummy); +void microcode_check(void); + +enum l1tf_mitigations { + L1TF_MITIGATION_OFF, + L1TF_MITIGATION_FLUSH_NOWARN, + L1TF_MITIGATION_FLUSH, + L1TF_MITIGATION_FLUSH_NOSMT, + L1TF_MITIGATION_FULL, + L1TF_MITIGATION_FULL_FORCE +}; + +extern enum l1tf_mitigations l1tf_mitigation; + +enum mds_mitigations { + MDS_MITIGATION_OFF, + MDS_MITIGATION_FULL, + MDS_MITIGATION_VMWERV, +}; +# 6 "./arch/x86/include/asm/cpufeature.h" 2 + + + + + + +enum cpuid_leafs +{ + CPUID_1_EDX = 0, + CPUID_8000_0001_EDX, + CPUID_8086_0001_EDX, + CPUID_LNX_1, + CPUID_1_ECX, + CPUID_C000_0001_EDX, + CPUID_8000_0001_ECX, + CPUID_LNX_2, + CPUID_LNX_3, + CPUID_7_0_EBX, + CPUID_D_1_EAX, + CPUID_LNX_4, + CPUID_7_1_EAX, + CPUID_8000_0008_EBX, + CPUID_6_EAX, + CPUID_8000_000A_EDX, + CPUID_7_ECX, + CPUID_8000_0007_EBX, + CPUID_7_EDX, +}; + + +extern const char * const x86_cap_flags[19*32]; +extern const char * const x86_power_flags[32]; +# 49 "./arch/x86/include/asm/cpufeature.h" +extern const char * const x86_bug_flags[1*32]; +# 141 "./arch/x86/include/asm/cpufeature.h" +extern void setup_clear_cpu_cap(unsigned int bit); +extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); +# 173 "./arch/x86/include/asm/cpufeature.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool _static_cpu_has(u16 bit) +{ + do { asm goto("1: jmp 6f\n" "2:\n" ".skip -(((5f-4f) - (2b-1b)) > 0) * " "((5f-4f) - (2b-1b)),0x90\n" "3:\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" " .long 4f - .\n" " .word %P[always]\n" " .byte 3b - 1b\n" " .byte 5f - 4f\n" " .byte 3b - 2b\n" ".previous\n" ".section .altinstr_replacement,\"ax\"\n" "4: jmp %l[t_no]\n" "5:\n" ".previous\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" " .long 0\n" " .word %P[feature]\n" " .byte 3b - 1b\n" " .byte 0\n" " .byte 0\n" ".previous\n" ".section .altinstr_aux,\"ax\"\n" "6:\n" " testb %[bitnum],%[cap_byte]\n" " jnz %l[t_yes]\n" " jmp %l[t_no]\n" ".previous\n" : : [feature] "i" (bit), [always] "i" (( 3*32+21)), [bitnum] "i" (1 << (bit & 7)), [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) : : t_yes, t_no); asm (""); } while (0) +# 210 "./arch/x86/include/asm/cpufeature.h" + ; +t_yes: + return true; +t_no: + return false; +} +# 54 "./arch/x86/include/asm/thread_info.h" 2 + + +struct thread_info { + unsigned long flags; + u32 status; +}; +# 182 "./arch/x86/include/asm/thread_info.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_within_stack_frames(const void * const stack, + const void * const stackend, + const void *obj, unsigned long len) +{ +# 214 "./arch/x86/include/asm/thread_info.h" + return NOT_STACK; + +} +# 238 "./arch/x86/include/asm/thread_info.h" +extern void arch_task_cache_init(void); +extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); +extern void arch_release_task_struct(struct task_struct *tsk); +extern void arch_setup_new_exec(void); +# 39 "./include/linux/thread_info.h" 2 +# 53 "./include/linux/thread_info.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_ti_thread_flag(struct thread_info *ti, int flag) +{ + set_bit(flag, (unsigned long *)&ti->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_ti_thread_flag(struct thread_info *ti, int flag) +{ + clear_bit(flag, (unsigned long *)&ti->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_ti_thread_flag(struct thread_info *ti, int flag, + bool value) +{ + if (value) + set_ti_thread_flag(ti, flag); + else + clear_ti_thread_flag(ti, flag); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) +{ + return test_and_set_bit(flag, (unsigned long *)&ti->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) +{ + return test_and_clear_bit(flag, (unsigned long *)&ti->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_ti_thread_flag(struct thread_info *ti, int flag) +{ + return test_bit(flag, (unsigned long *)&ti->flags); +} +# 112 "./include/linux/thread_info.h" +extern void __check_object_size(const void *ptr, unsigned long n, + bool to_user); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void check_object_size(const void *ptr, unsigned long n, + bool to_user) +{ + if (!__builtin_constant_p(n)) + __check_object_size(ptr, n, to_user); +} + + + + + + +extern void __attribute__((__error__("copy source size is too small"))) +__bad_copy_from(void); +extern void __attribute__((__error__("copy destination size is too small"))) +__bad_copy_to(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_overflow(int size, unsigned long count) +{ + ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (217)); }); __warn_printk("Buffer overflow detected (%d < %lu)!\n", size, count); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (218)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/thread_info.h"), "i" (134), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (219)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (220)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (221)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) bool +check_copy_size(const void *addr, size_t bytes, bool is_source) +{ + int sz = __builtin_object_size(addr, 0); + if (__builtin_expect(!!(sz >= 0 && sz < bytes), 0)) { + if (!__builtin_constant_p(bytes)) + copy_overflow(sz, bytes); + else if (is_source) + __bad_copy_from(); + else + __bad_copy_to(); + return false; + } + if (({ int __ret_warn_on = !!(bytes > ((int)(~0U >> 1))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (222)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/thread_info.h"), "i" (150), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (223)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (224)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return false; + check_object_size(addr, bytes, is_source); + return true; +} +# 8 "./arch/x86/include/asm/preempt.h" 2 + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope___preempt_count; extern __attribute__((section(".data..percpu" ""))) __typeof__(int) __preempt_count; +# 24 "./arch/x86/include/asm/preempt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int preempt_count(void) +{ + return ({ typeof(__preempt_count) pfo_ret__; switch (sizeof(__preempt_count)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (__preempt_count)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; default: __bad_percpu_size(); } pfo_ret__; }) & ~0x80000000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void preempt_count_set(int pc) +{ + int old, new; + + do { + old = ({ typeof(__preempt_count) pfo_ret__; switch (sizeof(__preempt_count)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (__preempt_count)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; default: __bad_percpu_size(); } pfo_ret__; }); + new = (old & 0x80000000) | + (pc & ~0x80000000); + } while (({ typeof(__preempt_count) pco_ret__; typeof(__preempt_count) pco_old__ = (old); typeof(__preempt_count) pco_new__ = (new); switch (sizeof(__preempt_count)) { case 1: asm ("cmpxchgb %2, ""%%""gs"":" "%" "1" : "=a" (pco_ret__), "+m" (__preempt_count) : "q" (pco_new__), "0" (pco_old__) : "memory"); break; case 2: asm ("cmpxchgw %2, ""%%""gs"":" "%" "1" : "=a" (pco_ret__), "+m" (__preempt_count) : "r" (pco_new__), "0" (pco_old__) : "memory"); break; case 4: asm ("cmpxchgl %2, ""%%""gs"":" "%" "1" : "=a" (pco_ret__), "+m" (__preempt_count) : "r" (pco_new__), "0" (pco_old__) : "memory"); break; case 8: asm ("cmpxchgq %2, ""%%""gs"":" "%" "1" : "=a" (pco_ret__), "+m" (__preempt_count) : "r" (pco_new__), "0" (pco_old__) : "memory"); break; default: __bad_percpu_size(); } pco_ret__; }) != old); +} +# 58 "./arch/x86/include/asm/preempt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void set_preempt_need_resched(void) +{ + do { typedef typeof((__preempt_count)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (~0x80000000); (void)pto_tmp__; } switch (sizeof((__preempt_count))) { case 1: asm ("and" "b %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "qi" ((pto_T__)(~0x80000000))); break; case 2: asm ("and" "w %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pto_T__)(~0x80000000))); break; case 4: asm ("and" "l %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pto_T__)(~0x80000000))); break; case 8: asm ("and" "q %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "re" ((pto_T__)(~0x80000000))); break; default: __bad_percpu_size(); } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void clear_preempt_need_resched(void) +{ + do { typedef typeof((__preempt_count)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (0x80000000); (void)pto_tmp__; } switch (sizeof((__preempt_count))) { case 1: asm ("or" "b %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "qi" ((pto_T__)(0x80000000))); break; case 2: asm ("or" "w %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pto_T__)(0x80000000))); break; case 4: asm ("or" "l %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pto_T__)(0x80000000))); break; case 8: asm ("or" "q %1,""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "re" ((pto_T__)(0x80000000))); break; default: __bad_percpu_size(); } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool test_preempt_need_resched(void) +{ + return !(({ typeof(__preempt_count) pfo_ret__; switch (sizeof(__preempt_count)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (__preempt_count)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; default: __bad_percpu_size(); } pfo_ret__; }) & 0x80000000); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __preempt_count_add(int val) +{ + do { typedef typeof((__preempt_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(val) && ((val) == 1 || (val) == -1)) ? (int)(val) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (val); (void)pao_tmp__; } switch (sizeof((__preempt_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "qi" ((pao_T__)(val))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pao_T__)(val))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pao_T__)(val))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "re" ((pao_T__)(val))); break; default: __bad_percpu_size(); } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __preempt_count_sub(int val) +{ + do { typedef typeof((__preempt_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-val) && ((-val) == 1 || (-val) == -1)) ? (int)(-val) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-val); (void)pao_tmp__; } switch (sizeof((__preempt_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "qi" ((pao_T__)(-val))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pao_T__)(-val))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "ri" ((pao_T__)(-val))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((__preempt_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((__preempt_count)) : "re" ((pao_T__)(-val))); break; default: __bad_percpu_size(); } } while (0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool __preempt_count_dec_and_test(void) +{ + return ({ bool c; asm volatile ("decl" " " "%%""gs"":" "%" "[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (__preempt_count), "=@cc" "e" (c) : : "memory"); c; }); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool should_resched(int preempt_offset) +{ + return __builtin_expect(!!(({ typeof(__preempt_count) pfo_ret__; switch (sizeof(__preempt_count)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (__preempt_count)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (__preempt_count)); break; default: __bad_percpu_size(); } pfo_ret__; }) == preempt_offset), 0); +} +# 79 "./include/linux/preempt.h" 2 +# 277 "./include/linux/preempt.h" +struct preempt_notifier; +# 293 "./include/linux/preempt.h" +struct preempt_ops { + void (*sched_in)(struct preempt_notifier *notifier, int cpu); + void (*sched_out)(struct preempt_notifier *notifier, + struct task_struct *next); +}; +# 306 "./include/linux/preempt.h" +struct preempt_notifier { + struct hlist_node link; + struct preempt_ops *ops; +}; + +void preempt_notifier_inc(void); +void preempt_notifier_dec(void); +void preempt_notifier_register(struct preempt_notifier *notifier); +void preempt_notifier_unregister(struct preempt_notifier *notifier); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void preempt_notifier_init(struct preempt_notifier *notifier, + struct preempt_ops *ops) +{ + INIT_HLIST_NODE(¬ifier->link); + notifier->ops = ops; +} +# 335 "./include/linux/preempt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void migrate_disable(void) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); +} +# 350 "./include/linux/preempt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void migrate_enable(void) +{ + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} +# 52 "./include/linux/spinlock.h" 2 + + + + + + +# 1 "./include/linux/bottom_half.h" 1 + + + + + + + +extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); +# 17 "./include/linux/bottom_half.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_bh_disable(void) +{ + __local_bh_disable_ip(({ __label__ __here; __here: (unsigned long)&&__here; }), (2 * (1UL << (0 + 8)))); +} + +extern void _local_bh_enable(void); +extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_bh_enable_ip(unsigned long ip) +{ + __local_bh_enable_ip(ip, (2 * (1UL << (0 + 8)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_bh_enable(void) +{ + __local_bh_enable_ip(({ __label__ __here; __here: (unsigned long)&&__here; }), (2 * (1UL << (0 + 8)))); +} +# 59 "./include/linux/spinlock.h" 2 + +# 1 "./arch/x86/include/generated/asm/mmiowb.h" 1 +# 1 "./include/asm-generic/mmiowb.h" 1 +# 1 "./arch/x86/include/generated/asm/mmiowb.h" 2 +# 61 "./include/linux/spinlock.h" 2 +# 83 "./include/linux/spinlock.h" +# 1 "./include/linux/spinlock_types.h" 1 +# 18 "./include/linux/spinlock_types.h" +# 1 "./include/linux/lockdep.h" 1 +# 13 "./include/linux/lockdep.h" +struct task_struct; +struct lockdep_map; + + +extern int prove_locking; +extern int lock_stat; + + + + + +enum lockdep_wait_type { + LD_WAIT_INV = 0, + + LD_WAIT_FREE, + LD_WAIT_SPIN, + + + LD_WAIT_CONFIG, + + + + LD_WAIT_SLEEP, + + LD_WAIT_MAX, +}; + + + + + +# 1 "./include/linux/debug_locks.h" 1 +# 9 "./include/linux/debug_locks.h" +struct task_struct; + +extern int debug_locks __attribute__((__section__(".data..read_mostly"))); +extern int debug_locks_silent __attribute__((__section__(".data..read_mostly"))); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int __debug_locks_off(void) +{ + return ({ typeof(&debug_locks) __ai_ptr = (&debug_locks); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = ((0)); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); }); +} + + + + +extern int debug_locks_off(void); +# 44 "./include/linux/debug_locks.h" + extern void locking_selftest(void); + + + + +struct task_struct; + + +extern void debug_show_all_locks(void); +extern void debug_show_held_locks(struct task_struct *task); +extern void debug_check_no_locks_freed(const void *from, unsigned long len); +extern void debug_check_no_locks_held(void); +# 45 "./include/linux/lockdep.h" 2 +# 1 "./include/linux/stacktrace.h" 1 + + + + + +# 1 "./arch/x86/include/generated/uapi/asm/errno.h" 1 +# 7 "./include/linux/stacktrace.h" 2 + +struct task_struct; +struct pt_regs; + + +void stack_trace_print(const unsigned long *trace, unsigned int nr_entries, + int spaces); +int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, + unsigned int nr_entries, int spaces); +unsigned int stack_trace_save(unsigned long *store, unsigned int size, + unsigned int skipnr); +unsigned int stack_trace_save_tsk(struct task_struct *task, + unsigned long *store, unsigned int size, + unsigned int skipnr); +unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, + unsigned int size, unsigned int skipnr); +unsigned int stack_trace_save_user(unsigned long *store, unsigned int size); +# 38 "./include/linux/stacktrace.h" +typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr, + bool reliable); +# 56 "./include/linux/stacktrace.h" +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs); +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task); +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs); +# 82 "./include/linux/stacktrace.h" +int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, + unsigned int size); +# 46 "./include/linux/lockdep.h" 2 +# 71 "./include/linux/lockdep.h" +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + + +struct lock_class_key { + union { + struct hlist_node hash_entry; + struct lockdep_subclass_key subkeys[8UL]; + }; +}; + +extern struct lock_class_key __lockdep_no_validate__; + +struct lock_trace; + + + + + + + +struct lock_class { + + + + struct hlist_node hash_entry; + + + + + + + struct list_head lock_entry; + + + + + + + struct list_head locks_after, locks_before; + + const struct lockdep_subclass_key *key; + unsigned int subclass; + unsigned int dep_gen_id; + + + + + unsigned long usage_mask; + const struct lock_trace *usage_traces[(1+2*4)]; + + + + + + int name_version; + const char *name; + + short wait_type_inner; + short wait_type_outer; + + + unsigned long contention_point[4]; + unsigned long contending_point[4]; + +} ; + + +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, +}; + +struct lock_class_stats { + unsigned long contention_point[4]; + unsigned long contending_point[4]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); + + + + + + +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class_cache[2]; + const char *name; + short wait_type_outer; + short wait_type_inner; + + int cpu; + unsigned long ip; + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_copy_map(struct lockdep_map *to, + struct lockdep_map *from) +{ + int i; + + *to = *from; +# 202 "./include/linux/lockdep.h" + for (i = 0; i < 2; i++) + to->class_cache[i] = ((void *)0); +} + + + + + +struct lock_list { + struct list_head entry; + struct lock_class *class; + struct lock_class *links_to; + const struct lock_trace *trace; + int distance; + + + + + + struct lock_list *parent; +}; +# 233 "./include/linux/lockdep.h" +struct lock_chain { + + unsigned int irq_context : 2, + depth : 6, + base : 24; + + struct hlist_node entry; + u64 chain_key; +}; + + + + + +struct held_lock { +# 262 "./include/linux/lockdep.h" + u64 prev_chain_key; + unsigned long acquire_ip; + struct lockdep_map *instance; + struct lockdep_map *nest_lock; + + u64 waittime_stamp; + u64 holdtime_stamp; + + + + + + + unsigned int class_idx:13; +# 289 "./include/linux/lockdep.h" + unsigned int irq_context:2; + unsigned int trylock:1; + + unsigned int read:2; + unsigned int check:1; + unsigned int hardirqs_off:1; + unsigned int references:12; + unsigned int pin_count; +}; + + + + +extern void lockdep_init(void); +extern void lockdep_reset(void); +extern void lockdep_reset_lock(struct lockdep_map *lock); +extern void lockdep_free_key_range(void *start, unsigned long size); +extern void lockdep_sys_exit(void); +extern void lockdep_set_selftest_task(struct task_struct *task); + +extern void lockdep_init_task(struct task_struct *task); +# 333 "./include/linux/lockdep.h" +extern void lockdep_register_key(struct lock_class_key *key); +extern void lockdep_unregister_key(struct lock_class_key *key); + + + + + + + +extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass, short inner, short outer); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +lockdep_init_map_wait(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass, short inner) +{ + lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass) +{ + lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); +} +# 392 "./include/linux/lockdep.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int lockdep_match_key(struct lockdep_map *lock, + struct lock_class_key *key) +{ + return lock->key == key; +} +# 412 "./include/linux/lockdep.h" +extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); + +extern void lock_release(struct lockdep_map *lock, unsigned long ip); + + + + +extern int lock_is_held_type(const struct lockdep_map *lock, int read); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int lock_is_held(const struct lockdep_map *lock) +{ + return lock_is_held_type(lock, -1); +} + + + + +extern void lock_set_class(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, unsigned int subclass, + unsigned long ip); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + lock_set_class(lock, lock->name, lock->key, subclass, ip); +} + +extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); + +struct pin_cookie { unsigned int val; }; + + + +extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); +extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); +extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); +# 562 "./include/linux/lockdep.h" +enum xhlock_context_t { + XHLOCK_HARD, + XHLOCK_SOFT, + XHLOCK_CTX_NR, +}; +# 576 "./include/linux/lockdep.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_invariant_state(bool force) {} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lockdep_free_task(struct task_struct *task) {} + + + +extern void lock_contended(struct lockdep_map *lock, unsigned long ip); +extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); +# 636 "./include/linux/lockdep.h" +extern void print_irqtrace_events(struct task_struct *curr); +# 749 "./include/linux/lockdep.h" +void lockdep_rcu_suspicious(const char *file, const int line, const char *s); +# 19 "./include/linux/spinlock_types.h" 2 + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; + + unsigned int magic, owner_cpu; + void *owner; + + + struct lockdep_map dep_map; + +} raw_spinlock_t; +# 71 "./include/linux/spinlock_types.h" +typedef struct spinlock { + union { + struct raw_spinlock rlock; + + + + struct { + u8 __padding[(__builtin_offsetof(struct raw_spinlock, dep_map))]; + struct lockdep_map dep_map; + }; + + }; +} spinlock_t; +# 99 "./include/linux/spinlock_types.h" +# 1 "./include/linux/rwlock_types.h" 1 +# 11 "./include/linux/rwlock_types.h" +typedef struct { + arch_rwlock_t raw_lock; + + unsigned int magic, owner_cpu; + void *owner; + + + struct lockdep_map dep_map; + +} rwlock_t; +# 100 "./include/linux/spinlock_types.h" 2 +# 84 "./include/linux/spinlock.h" 2 + + + + + +# 1 "./arch/x86/include/asm/spinlock.h" 1 +# 27 "./arch/x86/include/asm/spinlock.h" +# 1 "./arch/x86/include/asm/qspinlock.h" 1 +# 14 "./arch/x86/include/asm/qspinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) +{ + u32 val; + + + + + + + val = ({ bool c; asm volatile (".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "btsl" " %[val], " "%[var]" "\n\t/* output condition code " "c" "*/\n" : [var] "+m" (lock->val.counter), "=@cc" "c" (c) : [val] "I" ((0 + 8)) : "memory"); c; }) + * (1U << (0 + 8)); + val |= atomic_read(&lock->val) & ~(((1U << 8) - 1) << (0 + 8)); + + return val; +} + + +extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __pv_init_lock_hash(void); +extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); +# 43 "./arch/x86/include/asm/qspinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_queued_spin_unlock(struct qspinlock *lock) +{ + do { do { extern void __compiletime_assert_225(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&lock->locked) == sizeof(char) || sizeof(*&lock->locked) == sizeof(short) || sizeof(*&lock->locked) == sizeof(int) || sizeof(*&lock->locked) == sizeof(long)))) __compiletime_assert_225(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_226(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&lock->locked) == sizeof(char) || sizeof(*&lock->locked) == sizeof(short) || sizeof(*&lock->locked) == sizeof(int) || sizeof(*&lock->locked) == sizeof(long)) || sizeof(*&lock->locked) == sizeof(long long))) __compiletime_assert_226(); } while (0); do { *(volatile typeof(*&lock->locked) *)&(*&lock->locked) = (0); } while (0); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + pv_queued_spin_lock_slowpath(lock, val); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_spin_unlock(struct qspinlock *lock) +{ + pv_queued_spin_unlock(lock); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vcpu_is_preempted(long cpu) +{ + return pv_vcpu_is_preempted(cpu); +} +# 73 "./arch/x86/include/asm/qspinlock.h" +extern struct static_key_true virt_spin_lock_key; + +void native_pv_lock_init(void) __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))); +# 86 "./arch/x86/include/asm/qspinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool virt_spin_lock(struct qspinlock *lock) +{ + if (!({ bool branch; if (__builtin_types_compatible_p(typeof(*&virt_spin_lock_key), struct static_key_true)) branch = !arch_static_branch(&(&virt_spin_lock_key)->key, true); else if (__builtin_types_compatible_p(typeof(*&virt_spin_lock_key), struct static_key_false)) branch = !arch_static_branch_jump(&(&virt_spin_lock_key)->key, true); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 1); })) + return false; + + + + + + + + do { + while (atomic_read(&lock->val) != 0) + cpu_relax(); + } while (atomic_cmpxchg(&lock->val, 0, (1U << 0)) != 0); + + return true; +} + + + + + + +# 1 "./include/asm-generic/qspinlock.h" 1 +# 20 "./include/asm-generic/qspinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int queued_spin_is_locked(struct qspinlock *lock) +{ + + + + + return atomic_read(&lock->val); +} +# 39 "./include/asm-generic/qspinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int queued_spin_value_unlocked(struct qspinlock lock) +{ + return !atomic_read(&lock.val); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int queued_spin_is_contended(struct qspinlock *lock) +{ + return atomic_read(&lock->val) & ~(((1U << 8) - 1) << 0); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int queued_spin_trylock(struct qspinlock *lock) +{ + u32 val = atomic_read(&lock->val); + + if (__builtin_expect(!!(val), 0)) + return 0; + + return __builtin_expect(!!(atomic_try_cmpxchg_acquire(&lock->val, &val, (1U << 0))), 1); +} + +extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void queued_spin_lock(struct qspinlock *lock) +{ + u32 val = 0; + + if (__builtin_expect(!!(atomic_try_cmpxchg_acquire(&lock->val, &val, (1U << 0))), 1)) + return; + + queued_spin_lock_slowpath(lock, val); +} +# 111 "./arch/x86/include/asm/qspinlock.h" 2 +# 28 "./arch/x86/include/asm/spinlock.h" 2 +# 43 "./arch/x86/include/asm/spinlock.h" +# 1 "./arch/x86/include/asm/qrwlock.h" 1 + + + + + +# 1 "./include/asm-generic/qrwlock.h" 1 +# 30 "./include/asm-generic/qrwlock.h" +extern void queued_read_lock_slowpath(struct qrwlock *lock); +extern void queued_write_lock_slowpath(struct qrwlock *lock); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queued_read_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (__builtin_expect(!!(!(cnts & 0x1ff)), 1)) { + cnts = (u32)atomic_add_return_acquire((1U << 9), &lock->cnts); + if (__builtin_expect(!!(!(cnts & 0x1ff)), 1)) + return 1; + atomic_sub((1U << 9), &lock->cnts); + } + return 0; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queued_write_trylock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_read(&lock->cnts); + if (__builtin_expect(!!(cnts), 0)) + return 0; + + return __builtin_expect(!!(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, 0x0ff)), 1) + ; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_read_lock(struct qrwlock *lock) +{ + u32 cnts; + + cnts = atomic_add_return_acquire((1U << 9), &lock->cnts); + if (__builtin_expect(!!(!(cnts & 0x1ff)), 1)) + return; + + + queued_read_lock_slowpath(lock); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_write_lock(struct qrwlock *lock) +{ + u32 cnts = 0; + + if (__builtin_expect(!!(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, 0x0ff)), 1)) + return; + + queued_write_lock_slowpath(lock); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_read_unlock(struct qrwlock *lock) +{ + + + + (void)atomic_sub_return_release((1U << 9), &lock->cnts); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void queued_write_unlock(struct qrwlock *lock) +{ + do { do { extern void __compiletime_assert_227(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&lock->wlocked) == sizeof(char) || sizeof(*&lock->wlocked) == sizeof(short) || sizeof(*&lock->wlocked) == sizeof(int) || sizeof(*&lock->wlocked) == sizeof(long)))) __compiletime_assert_227(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_228(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&lock->wlocked) == sizeof(char) || sizeof(*&lock->wlocked) == sizeof(short) || sizeof(*&lock->wlocked) == sizeof(int) || sizeof(*&lock->wlocked) == sizeof(long)) || sizeof(*&lock->wlocked) == sizeof(long long))) __compiletime_assert_228(); } while (0); do { *(volatile typeof(*&lock->wlocked) *)&(*&lock->wlocked) = (0); } while (0); } while (0); } while (0); +} +# 7 "./arch/x86/include/asm/qrwlock.h" 2 +# 44 "./arch/x86/include/asm/spinlock.h" 2 +# 90 "./include/linux/spinlock.h" 2 + + + + + + extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, + struct lock_class_key *key, short inner); +# 174 "./include/linux/spinlock.h" + extern void do_raw_spin_lock(raw_spinlock_t *lock) ; + + extern int do_raw_spin_trylock(raw_spinlock_t *lock); + extern void do_raw_spin_unlock(raw_spinlock_t *lock) ; +# 311 "./include/linux/spinlock.h" +# 1 "./include/linux/rwlock.h" 1 +# 18 "./include/linux/rwlock.h" + extern void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key); +# 32 "./include/linux/rwlock.h" + extern void do_raw_read_lock(rwlock_t *lock) ; + + extern int do_raw_read_trylock(rwlock_t *lock); + extern void do_raw_read_unlock(rwlock_t *lock) ; + extern void do_raw_write_lock(rwlock_t *lock) ; + + extern int do_raw_write_trylock(rwlock_t *lock); + extern void do_raw_write_unlock(rwlock_t *lock) ; +# 312 "./include/linux/spinlock.h" 2 + + + + + +# 1 "./include/linux/spinlock_api_smp.h" 1 +# 18 "./include/linux/spinlock_api_smp.h" +int in_lock_functions(unsigned long addr); + + + +void __attribute__((section(".spinlock.text"))) _raw_spin_lock(raw_spinlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) + ; +void __attribute__((section(".spinlock.text"))) +_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) + ; +void __attribute__((section(".spinlock.text"))) _raw_spin_lock_bh(raw_spinlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_spin_lock_irq(raw_spinlock_t *lock) + ; + +unsigned long __attribute__((section(".spinlock.text"))) _raw_spin_lock_irqsave(raw_spinlock_t *lock) + ; +unsigned long __attribute__((section(".spinlock.text"))) +_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) + ; +int __attribute__((section(".spinlock.text"))) _raw_spin_trylock(raw_spinlock_t *lock); +int __attribute__((section(".spinlock.text"))) _raw_spin_trylock_bh(raw_spinlock_t *lock); +void __attribute__((section(".spinlock.text"))) _raw_spin_unlock(raw_spinlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_bh(raw_spinlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_irq(raw_spinlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) +_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) + ; +# 86 "./include/linux/spinlock_api_smp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __raw_spin_trylock(raw_spinlock_t *lock) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + if (do_raw_spin_trylock(lock)) { + lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + return 1; + } + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + return 0; +} +# 104 "./include/linux/spinlock_api_smp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + + + + + + + do { if (!do_raw_spin_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_spin_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); + + + + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_lock_irq(raw_spinlock_t *lock) +{ + do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_spin_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_spin_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_lock_bh(raw_spinlock_t *lock) +{ + __local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0))); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_spin_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_spin_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_lock(raw_spinlock_t *lock) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_spin_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_spin_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_unlock(raw_spinlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_spin_unlock(lock); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, + unsigned long flags) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_spin_unlock(lock); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_unlock_irq(raw_spinlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_spin_unlock(lock); + do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_spin_unlock_bh(raw_spinlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_spin_unlock(lock); + __local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __raw_spin_trylock_bh(raw_spinlock_t *lock) +{ + __local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0))); + if (do_raw_spin_trylock(lock)) { + lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + return 1; + } + __local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0))); + return 0; +} + +# 1 "./include/linux/rwlock_api_smp.h" 1 +# 18 "./include/linux/rwlock_api_smp.h" +void __attribute__((section(".spinlock.text"))) _raw_read_lock(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_write_lock(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_read_lock_bh(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_write_lock_bh(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_read_lock_irq(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_write_lock_irq(rwlock_t *lock) ; +unsigned long __attribute__((section(".spinlock.text"))) _raw_read_lock_irqsave(rwlock_t *lock) + ; +unsigned long __attribute__((section(".spinlock.text"))) _raw_write_lock_irqsave(rwlock_t *lock) + ; +int __attribute__((section(".spinlock.text"))) _raw_read_trylock(rwlock_t *lock); +int __attribute__((section(".spinlock.text"))) _raw_write_trylock(rwlock_t *lock); +void __attribute__((section(".spinlock.text"))) _raw_read_unlock(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_write_unlock(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_read_unlock_bh(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_write_unlock_bh(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_read_unlock_irq(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) _raw_write_unlock_irq(rwlock_t *lock) ; +void __attribute__((section(".spinlock.text"))) +_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + ; +void __attribute__((section(".spinlock.text"))) +_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) + ; +# 117 "./include/linux/rwlock_api_smp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __raw_read_trylock(rwlock_t *lock) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + if (do_raw_read_trylock(lock)) { + lock_acquire(&lock->dep_map, 0, 1, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + return 1; + } + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __raw_write_trylock(rwlock_t *lock) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + if (do_raw_write_trylock(lock)) { + lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + return 1; + } + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + return 0; +} +# 146 "./include/linux/rwlock_api_smp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_lock(rwlock_t *lock) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_read_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_read_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __raw_read_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!(do_raw_read_trylock)((lock))) { lock_contended(&((lock))->dep_map, (unsigned long)__builtin_return_address(0)); (do_raw_read_lock)((lock)); } lock_acquired(&((lock))->dep_map, (unsigned long)__builtin_return_address(0)); } while (0) + ; + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_lock_irq(rwlock_t *lock) +{ + do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_read_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_read_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_lock_bh(rwlock_t *lock) +{ + __local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0))); + lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_read_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_read_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __raw_write_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!(do_raw_write_trylock)((lock))) { lock_contended(&((lock))->dep_map, (unsigned long)__builtin_return_address(0)); (do_raw_write_lock)((lock)); } lock_acquired(&((lock))->dep_map, (unsigned long)__builtin_return_address(0)); } while (0) + ; + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_lock_irq(rwlock_t *lock) +{ + do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_write_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_write_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_lock_bh(rwlock_t *lock) +{ + __local_bh_disable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0))); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_write_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_write_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_lock(rwlock_t *lock) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + do { if (!do_raw_write_trylock(lock)) { lock_contended(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); do_raw_write_lock(lock); } lock_acquired(&(lock)->dep_map, (unsigned long)__builtin_return_address(0)); } while (0); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_unlock(rwlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_write_unlock(lock); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_unlock(rwlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_read_unlock(lock); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_read_unlock(lock); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_unlock_irq(rwlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_read_unlock(lock); + do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_read_unlock_bh(rwlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_read_unlock(lock); + __local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_unlock_irqrestore(rwlock_t *lock, + unsigned long flags) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_write_unlock(lock); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_unlock_irq(rwlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_write_unlock(lock); + do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __raw_write_unlock_bh(rwlock_t *lock) +{ + lock_release(&lock->dep_map, (unsigned long)__builtin_return_address(0)); + do_raw_write_unlock(lock); + __local_bh_enable_ip((unsigned long)__builtin_return_address(0), ((2 * (1UL << (0 + 8))) + (1UL << 0))); +} +# 191 "./include/linux/spinlock_api_smp.h" 2 +# 318 "./include/linux/spinlock.h" 2 +# 326 "./include/linux/spinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) raw_spinlock_t *spinlock_check(spinlock_t *lock) +{ + return &lock->rlock; +} +# 351 "./include/linux/spinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_lock(spinlock_t *lock) +{ + _raw_spin_lock(&lock->rlock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_lock_bh(spinlock_t *lock) +{ + _raw_spin_lock_bh(&lock->rlock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_trylock(spinlock_t *lock) +{ + return (_raw_spin_trylock(&lock->rlock)); +} +# 376 "./include/linux/spinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_lock_irq(spinlock_t *lock) +{ + _raw_spin_lock_irq(&lock->rlock); +} +# 391 "./include/linux/spinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_unlock(spinlock_t *lock) +{ + _raw_spin_unlock(&lock->rlock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_unlock_bh(spinlock_t *lock) +{ + _raw_spin_unlock_bh(&lock->rlock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_unlock_irq(spinlock_t *lock) +{ + _raw_spin_unlock_irq(&lock->rlock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +{ + do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&lock->rlock, flags); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_trylock_bh(spinlock_t *lock) +{ + return (_raw_spin_trylock_bh(&lock->rlock)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_trylock_irq(spinlock_t *lock) +{ + return ({ do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); (_raw_spin_trylock(&lock->rlock)) ? 1 : ({ do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); 0; }); }); +} +# 444 "./include/linux/spinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_is_locked(spinlock_t *lock) +{ + return queued_spin_is_locked(&(&lock->rlock)->raw_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int spin_is_contended(spinlock_t *lock) +{ + return queued_spin_is_contended(&(&lock->rlock)->raw_lock); +} +# 469 "./include/linux/spinlock.h" +extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); + + + +extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, + unsigned long *flags); + + + +int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, + size_t max_size, unsigned int cpu_mult, + gfp_t gfp, const char *name, + struct lock_class_key *key); +# 493 "./include/linux/spinlock.h" +void free_bucket_spinlocks(spinlock_t *locks); +# 10 "./include/linux/wait.h" 2 + + +# 1 "./include/uapi/linux/wait.h" 1 +# 13 "./include/linux/wait.h" 2 + +typedef struct wait_queue_entry wait_queue_entry_t; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); +int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); +# 28 "./include/linux/wait.h" +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; +typedef struct wait_queue_head wait_queue_head_t; + +struct task_struct; +# 62 "./include/linux/wait.h" +extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *); +# 80 "./include/linux/wait.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p) +{ + wq_entry->flags = 0; + wq_entry->private = p; + wq_entry->func = default_wake_function; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func) +{ + wq_entry->flags = 0; + wq_entry->private = ((void *)0); + wq_entry->func = func; +} +# 125 "./include/linux/wait.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int waitqueue_active(struct wait_queue_head *wq_head) +{ + return !list_empty(&wq_head->head); +} +# 138 "./include/linux/wait.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool wq_has_single_sleeper(struct wait_queue_head *wq_head) +{ + return list_is_singular(&wq_head->head); +} +# 151 "./include/linux/wait.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool wq_has_sleeper(struct wait_queue_head *wq_head) +{ + + + + + + + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + return waitqueue_active(wq_head); +} + +extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); +extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); +extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) +{ + list_add(&wq_entry->entry, &wq_head->head); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) +{ + wq_entry->flags |= 0x01; + __add_wait_queue(wq_head, wq_entry); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) +{ + list_add_tail(&wq_entry->entry, &wq_head->head); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) +{ + wq_entry->flags |= 0x01; + __add_wait_queue_entry_tail(wq_head, wq_entry); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry) +{ + list_del(&wq_entry->entry); +} + +void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); +void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); +void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, + unsigned int mode, void *key, wait_queue_entry_t *bookmark); +void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); +void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); +void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); +void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); +# 249 "./include/linux/wait.h" +extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags); +# 733 "./include/linux/wait.h" +extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); +extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); +# 1127 "./include/linux/wait.h" +void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); +void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); +long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state); +void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry); +long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); +int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); +int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); +# 1152 "./include/linux/wait.h" +bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg); +# 9 "./include/linux/wait_bit.h" 2 + +struct wait_bit_key { + void *flags; + int bit_nr; + unsigned long timeout; +}; + +struct wait_bit_queue_entry { + struct wait_bit_key key; + struct wait_queue_entry wq_entry; +}; + + + + +typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); + +void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); +int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); +int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); +void wake_up_bit(void *word, int bit); +int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); +int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); +int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); +struct wait_queue_head *bit_waitqueue(void *word, int bit); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) wait_bit_init(void); + +int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); +# 49 "./include/linux/wait_bit.h" +extern int bit_wait(struct wait_bit_key *key, int mode); +extern int bit_wait_io(struct wait_bit_key *key, int mode); +extern int bit_wait_timeout(struct wait_bit_key *key, int mode); +extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); +# 70 "./include/linux/wait_bit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +wait_on_bit(unsigned long *word, int bit, unsigned mode) +{ + do { __might_sleep("include/linux/wait_bit.h", 73, 0); do { } while (0); } while (0); + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, + bit_wait, + mode); +} +# 95 "./include/linux/wait_bit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +wait_on_bit_io(unsigned long *word, int bit, unsigned mode) +{ + do { __might_sleep("include/linux/wait_bit.h", 98, 0); do { } while (0); } while (0); + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, + bit_wait_io, + mode); +} +# 121 "./include/linux/wait_bit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, + unsigned long timeout) +{ + do { __might_sleep("include/linux/wait_bit.h", 125, 0); do { } while (0); } while (0); + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_timeout(word, bit, + bit_wait_timeout, + mode, timeout); +} +# 149 "./include/linux/wait_bit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, + unsigned mode) +{ + do { __might_sleep("include/linux/wait_bit.h", 153, 0); do { } while (0); } while (0); + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, action, mode); +} +# 178 "./include/linux/wait_bit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) +{ + do { __might_sleep("include/linux/wait_bit.h", 181, 0); do { } while (0); } while (0); + if (!test_and_set_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); +} +# 202 "./include/linux/wait_bit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode) +{ + do { __might_sleep("include/linux/wait_bit.h", 205, 0); do { } while (0); } while (0); + if (!test_and_set_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); +} +# 228 "./include/linux/wait_bit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action, + unsigned mode) +{ + do { __might_sleep("include/linux/wait_bit.h", 232, 0); do { } while (0); } while (0); + if (!test_and_set_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_lock(word, bit, action, mode); +} + +extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags); +extern void wake_up_var(void *var); +extern wait_queue_head_t *__var_waitqueue(void *p); +# 330 "./include/linux/wait_bit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_and_wake_up_bit(int bit, void *word) +{ + clear_bit_unlock(bit, word); + + do { } while (0); + wake_up_bit(word, bit); +} +# 7 "./include/linux/fs.h" 2 +# 1 "./include/linux/kdev_t.h" 1 + + + + +# 1 "./include/uapi/linux/kdev_t.h" 1 +# 6 "./include/linux/kdev_t.h" 2 +# 24 "./include/linux/kdev_t.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool old_valid_dev(dev_t dev) +{ + return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 old_encode_dev(dev_t dev) +{ + return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dev_t old_decode_dev(u16 val) +{ + return ((((val >> 8) & 255) << 20) | (val & 255)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 new_encode_dev(dev_t dev) +{ + unsigned major = ((unsigned int) ((dev) >> 20)); + unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1))); + return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dev_t new_decode_dev(u32 dev) +{ + unsigned major = (dev & 0xfff00) >> 8; + unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); + return (((major) << 20) | (minor)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 huge_encode_dev(dev_t dev) +{ + return new_encode_dev(dev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dev_t huge_decode_dev(u64 dev) +{ + return new_decode_dev(dev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sysv_valid_dev(dev_t dev) +{ + return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 sysv_encode_dev(dev_t dev) +{ + return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned sysv_major(u32 dev) +{ + return (dev >> 18) & 0x3fff; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned sysv_minor(u32 dev) +{ + return dev & 0x3ffff; +} +# 8 "./include/linux/fs.h" 2 +# 1 "./include/linux/dcache.h" 1 + + + + + + +# 1 "./include/linux/rculist.h" 1 +# 11 "./include/linux/rculist.h" +# 1 "./include/linux/rcupdate.h" 1 +# 38 "./include/linux/rcupdate.h" +void call_rcu(struct callback_head *head, rcu_callback_t func); +void rcu_barrier_tasks(void); +void rcu_barrier_tasks_rude(void); +void synchronize_rcu(void); +# 58 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __rcu_read_lock(void) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __rcu_read_unlock(void) +{ + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rcu_preempt_depth(void) +{ + return 0; +} + + + + +void rcu_init(void); +extern int rcu_scheduler_active __attribute__((__section__(".data..read_mostly"))); +void rcu_sched_clock_irq(int user); +void rcu_report_dead(unsigned int cpu); +void rcutree_migrate_callbacks(int cpu); + + +void rcu_sysrq_start(void); +void rcu_sysrq_end(void); +# 94 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_user_enter(void) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_user_exit(void) { } + + + +void rcu_init_nohz(void); +# 141 "./include/linux/rcupdate.h" +void call_rcu_tasks(struct callback_head *head, rcu_callback_t func); +void synchronize_rcu_tasks(void); +# 169 "./include/linux/rcupdate.h" +void call_rcu_tasks_rude(struct callback_head *head, rcu_callback_t func); +void synchronize_rcu_tasks_rude(void); + + + +void exit_tasks_rcu_start(void); +void exit_tasks_rcu_finish(void); +# 204 "./include/linux/rcupdate.h" +# 1 "./include/linux/rcutree.h" 1 +# 20 "./include/linux/rcutree.h" +void rcu_softirq_qs(void); +void rcu_note_context_switch(bool preempt); +int rcu_needs_cpu(u64 basem, u64 *nextevt); +void rcu_cpu_stall_reset(void); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_virt_note_context_switch(int cpu) +{ + rcu_note_context_switch(false); +} + +void synchronize_rcu_expedited(void); +void kfree_call_rcu(struct callback_head *head, rcu_callback_t func); + +void rcu_barrier(void); +bool rcu_eqs_special_set(int cpu); +void rcu_momentary_dyntick_idle(void); +void kfree_rcu_scheduler_running(void); +bool rcu_gp_might_be_stalled(void); +unsigned long get_state_synchronize_rcu(void); +void cond_synchronize_rcu(unsigned long oldstate); + +void rcu_idle_enter(void); +void rcu_idle_exit(void); +void rcu_irq_enter(void); +void rcu_irq_exit(void); +void rcu_irq_exit_preempt(void); +void rcu_irq_enter_irqson(void); +void rcu_irq_exit_irqson(void); + + +void rcu_irq_exit_check_preempt(void); + + + + +void exit_rcu(void); + +void rcu_scheduler_starting(void); +extern int rcu_scheduler_active __attribute__((__section__(".data..read_mostly"))); +void rcu_end_inkernel_boot(void); +bool rcu_inkernel_boot_has_ended(void); +bool rcu_is_watching(void); +bool __rcu_is_watching(void); + +void rcu_all_qs(void); + + + +int rcutree_prepare_cpu(unsigned int cpu); +int rcutree_online_cpu(unsigned int cpu); +int rcutree_offline_cpu(unsigned int cpu); +int rcutree_dead_cpu(unsigned int cpu); +int rcutree_dying_cpu(unsigned int cpu); +void rcu_cpu_starting(unsigned int cpu); +# 205 "./include/linux/rcupdate.h" 2 +# 220 "./include/linux/rcupdate.h" +void init_rcu_head(struct callback_head *head); +void destroy_rcu_head(struct callback_head *head); +void init_rcu_head_on_stack(struct callback_head *head); +void destroy_rcu_head_on_stack(struct callback_head *head); +# 232 "./include/linux/rcupdate.h" +bool rcu_lockdep_current_cpu_online(void); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_lock_acquire(struct lockdep_map *map) +{ + lock_acquire(map, 0, 0, 2, 0, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; })); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_lock_release(struct lockdep_map *map) +{ + lock_release(map, ({ __label__ __here; __here: (unsigned long)&&__here; })); +} + +extern struct lockdep_map rcu_lock_map; +extern struct lockdep_map rcu_bh_lock_map; +extern struct lockdep_map rcu_sched_lock_map; +extern struct lockdep_map rcu_callback_map; +int debug_lockdep_rcu_enabled(void); +int rcu_read_lock_held(void); +int rcu_read_lock_bh_held(void); +int rcu_read_lock_sched_held(void); +int rcu_read_lock_any_held(void); +# 303 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_preempt_sleep_check(void) +{ + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (lock_is_held(&rcu_lock_map))) { __warned = true; lockdep_rcu_suspicious("include/linux/rcupdate.h", 305, "Illegal context switch in RCU read-side critical section"); } } while (0) + ; +} +# 630 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void rcu_read_lock(void) +{ + __rcu_read_lock(); + (void)0; + rcu_lock_acquire(&rcu_lock_map); + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!rcu_is_watching())) { __warned = true; lockdep_rcu_suspicious("include/linux/rcupdate.h", 635, "rcu_read_lock() used illegally while idle"); } } while (0) + ; +} +# 682 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_unlock(void) +{ + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!rcu_is_watching())) { __warned = true; lockdep_rcu_suspicious("include/linux/rcupdate.h", 684, "rcu_read_unlock() used illegally while idle"); } } while (0) + ; + (void)0; + __rcu_read_unlock(); + rcu_lock_release(&rcu_lock_map); +} +# 703 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_lock_bh(void) +{ + local_bh_disable(); + (void)0; + rcu_lock_acquire(&rcu_bh_lock_map); + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!rcu_is_watching())) { __warned = true; lockdep_rcu_suspicious("include/linux/rcupdate.h", 708, "rcu_read_lock_bh() used illegally while idle"); } } while (0) + ; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_unlock_bh(void) +{ + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!rcu_is_watching())) { __warned = true; lockdep_rcu_suspicious("include/linux/rcupdate.h", 719, "rcu_read_unlock_bh() used illegally while idle"); } } while (0) + ; + rcu_lock_release(&rcu_bh_lock_map); + (void)0; + local_bh_enable(); +} +# 738 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_lock_sched(void) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + (void)0; + rcu_lock_acquire(&rcu_sched_lock_map); + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!rcu_is_watching())) { __warned = true; lockdep_rcu_suspicious("include/linux/rcupdate.h", 743, "rcu_read_lock_sched() used illegally while idle"); } } while (0) + ; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void rcu_read_lock_sched_notrace(void) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + (void)0; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched(void) +{ + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!rcu_is_watching())) { __warned = true; lockdep_rcu_suspicious("include/linux/rcupdate.h", 761, "rcu_read_unlock_sched() used illegally while idle"); } } while (0) + ; + rcu_lock_release(&rcu_sched_lock_map); + (void)0; + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched_notrace(void) +{ + (void)0; + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} +# 903 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcu_head_init(struct callback_head *rhp) +{ + rhp->func = (rcu_callback_t)~0L; +} +# 921 "./include/linux/rcupdate.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +rcu_head_after_call_rcu(struct callback_head *rhp, rcu_callback_t f) +{ + rcu_callback_t func = ({ do { extern void __compiletime_assert_229(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(rhp->func) == sizeof(char) || sizeof(rhp->func) == sizeof(short) || sizeof(rhp->func) == sizeof(int) || sizeof(rhp->func) == sizeof(long)) || sizeof(rhp->func) == sizeof(long long))) __compiletime_assert_229(); } while (0); ({ typeof( _Generic((rhp->func), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rhp->func))) __x = (*(const volatile typeof( _Generic((rhp->func), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rhp->func))) *)&(rhp->func)); do { } while (0); (typeof(rhp->func))__x; }); }); + + if (func == f) + return true; + ({ int __ret_warn_on = !!(func != (rcu_callback_t)~0L); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (230)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/rcupdate.h"), "i" (928), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (231)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (232)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return false; +} + + +extern int rcu_expedited; +extern int rcu_normal; +# 12 "./include/linux/rculist.h" 2 +# 31 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void INIT_LIST_HEAD_RCU(struct list_head *list) +{ + do { do { extern void __compiletime_assert_233(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->next) == sizeof(char) || sizeof(list->next) == sizeof(short) || sizeof(list->next) == sizeof(int) || sizeof(list->next) == sizeof(long)) || sizeof(list->next) == sizeof(long long))) __compiletime_assert_233(); } while (0); do { *(volatile typeof(list->next) *)&(list->next) = (list); } while (0); } while (0); + do { do { extern void __compiletime_assert_234(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->prev) == sizeof(char) || sizeof(list->prev) == sizeof(short) || sizeof(list->prev) == sizeof(int) || sizeof(list->prev) == sizeof(long)) || sizeof(list->prev) == sizeof(long long))) __compiletime_assert_234(); } while (0); do { *(volatile typeof(list->prev) *)&(list->prev) = (list); } while (0); } while (0); +} +# 77 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_add_rcu(struct list_head *new, + struct list_head *prev, struct list_head *next) +{ + if (!__list_add_valid(new, prev, next)) + return; + + new->next = next; + new->prev = prev; + do { uintptr_t _r_a_p__v = (uintptr_t)(new); ; if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_235(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(long long))) __compiletime_assert_235(); } while (0); do { *(volatile typeof(((*((struct list_head **)(&(prev)->next))))) *)&(((*((struct list_head **)(&(prev)->next))))) = ((typeof((*((struct list_head **)(&(prev)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_236(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_236(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_237(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long long))) __compiletime_assert_237(); } while (0); do { *(volatile typeof(*&(*((struct list_head **)(&(prev)->next)))) *)&(*&(*((struct list_head **)(&(prev)->next)))) = ((typeof(*((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + next->prev = new; +} +# 105 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_add_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head, head->next); +} +# 126 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_add_tail_rcu(struct list_head *new, + struct list_head *head) +{ + __list_add_rcu(new, head->prev, head); +} +# 156 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_del_rcu(struct list_head *entry) +{ + __list_del_entry(entry); + entry->prev = ((void *) 0x122 + (0xdead000000000000UL)); +} +# 182 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_del_init_rcu(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + do { do { extern void __compiletime_assert_238(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_238(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *)0)); } while (0); } while (0); + } +} +# 198 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_replace_rcu(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->prev = old->prev; + do { uintptr_t _r_a_p__v = (uintptr_t)(new); ; if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_239(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(new->prev)->next))))) == sizeof(long long))) __compiletime_assert_239(); } while (0); do { *(volatile typeof(((*((struct list_head **)(&(new->prev)->next))))) *)&(((*((struct list_head **)(&(new->prev)->next))))) = ((typeof((*((struct list_head **)(&(new->prev)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_240(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(long)))) __compiletime_assert_240(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_241(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(long)) || sizeof(*&(*((struct list_head **)(&(new->prev)->next)))) == sizeof(long long))) __compiletime_assert_241(); } while (0); do { *(volatile typeof(*&(*((struct list_head **)(&(new->prev)->next)))) *)&(*&(*((struct list_head **)(&(new->prev)->next)))) = ((typeof(*((typeof((*((struct list_head **)(&(new->prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(new->prev)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + new->next->prev = new; + old->prev = ((void *) 0x122 + (0xdead000000000000UL)); +} +# 227 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __list_splice_init_rcu(struct list_head *list, + struct list_head *prev, + struct list_head *next, + void (*sync)(void)) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + + + + + + + INIT_LIST_HEAD_RCU(list); +# 250 "./include/linux/rculist.h" + sync(); +# 260 "./include/linux/rculist.h" + last->next = next; + do { uintptr_t _r_a_p__v = (uintptr_t)(first); ; if (__builtin_constant_p(first) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_242(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(prev)->next))))) == sizeof(long long))) __compiletime_assert_242(); } while (0); do { *(volatile typeof(((*((struct list_head **)(&(prev)->next))))) *)&(((*((struct list_head **)(&(prev)->next))))) = ((typeof((*((struct list_head **)(&(prev)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_243(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_243(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_244(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long)) || sizeof(*&(*((struct list_head **)(&(prev)->next)))) == sizeof(long long))) __compiletime_assert_244(); } while (0); do { *(volatile typeof(*&(*((struct list_head **)(&(prev)->next)))) *)&(*&(*((struct list_head **)(&(prev)->next)))) = ((typeof(*((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct list_head **)(&(prev)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + first->prev = prev; + next->prev = last; +} +# 273 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head, head->next, sync); +} +# 288 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void list_splice_tail_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head->prev, head, sync); +} +# 483 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_del_rcu(struct hlist_node *n) +{ + __hlist_del(n); + do { do { extern void __compiletime_assert_245(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_245(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *) 0x122 + (0xdead000000000000UL))); } while (0); } while (0); +} +# 496 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_replace_rcu(struct hlist_node *old, + struct hlist_node *new) +{ + struct hlist_node *next = old->next; + + new->next = next; + do { do { extern void __compiletime_assert_246(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(new->pprev) == sizeof(char) || sizeof(new->pprev) == sizeof(short) || sizeof(new->pprev) == sizeof(int) || sizeof(new->pprev) == sizeof(long)) || sizeof(new->pprev) == sizeof(long long))) __compiletime_assert_246(); } while (0); do { *(volatile typeof(new->pprev) *)&(new->pprev) = (old->pprev); } while (0); } while (0); + do { uintptr_t _r_a_p__v = (uintptr_t)(new); ; if (__builtin_constant_p(new) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_247(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((*(struct hlist_node **)new->pprev)) == sizeof(char) || sizeof((*(struct hlist_node **)new->pprev)) == sizeof(short) || sizeof((*(struct hlist_node **)new->pprev)) == sizeof(int) || sizeof((*(struct hlist_node **)new->pprev)) == sizeof(long)) || sizeof((*(struct hlist_node **)new->pprev)) == sizeof(long long))) __compiletime_assert_247(); } while (0); do { *(volatile typeof((*(struct hlist_node **)new->pprev)) *)&((*(struct hlist_node **)new->pprev)) = ((typeof(*(struct hlist_node **)new->pprev))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_248(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(char) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(short) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(int) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(long)))) __compiletime_assert_248(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_249(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(char) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(short) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(int) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(long)) || sizeof(*&*(struct hlist_node **)new->pprev) == sizeof(long long))) __compiletime_assert_249(); } while (0); do { *(volatile typeof(*&*(struct hlist_node **)new->pprev) *)&(*&*(struct hlist_node **)new->pprev) = ((typeof(*((typeof(*(struct hlist_node **)new->pprev))_r_a_p__v)) *)((typeof(*(struct hlist_node **)new->pprev))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + if (next) + do { do { extern void __compiletime_assert_250(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(new->next->pprev) == sizeof(char) || sizeof(new->next->pprev) == sizeof(short) || sizeof(new->next->pprev) == sizeof(int) || sizeof(new->next->pprev) == sizeof(long)) || sizeof(new->next->pprev) == sizeof(long long))) __compiletime_assert_250(); } while (0); do { *(volatile typeof(new->next->pprev) *)&(new->next->pprev) = (&new->next); } while (0); } while (0); + do { do { extern void __compiletime_assert_251(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(old->pprev) == sizeof(char) || sizeof(old->pprev) == sizeof(short) || sizeof(old->pprev) == sizeof(int) || sizeof(old->pprev) == sizeof(long)) || sizeof(old->pprev) == sizeof(long long))) __compiletime_assert_251(); } while (0); do { *(volatile typeof(old->pprev) *)&(old->pprev) = (((void *) 0x122 + (0xdead000000000000UL))); } while (0); } while (0); +} +# 519 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) +{ + struct hlist_node *node1 = left->first; + struct hlist_node *node2 = right->first; + + do { uintptr_t _r_a_p__v = (uintptr_t)(node2); ; if (__builtin_constant_p(node2) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_252(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((left->first)) == sizeof(char) || sizeof((left->first)) == sizeof(short) || sizeof((left->first)) == sizeof(int) || sizeof((left->first)) == sizeof(long)) || sizeof((left->first)) == sizeof(long long))) __compiletime_assert_252(); } while (0); do { *(volatile typeof((left->first)) *)&((left->first)) = ((typeof(left->first))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_253(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&left->first) == sizeof(char) || sizeof(*&left->first) == sizeof(short) || sizeof(*&left->first) == sizeof(int) || sizeof(*&left->first) == sizeof(long)))) __compiletime_assert_253(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_254(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&left->first) == sizeof(char) || sizeof(*&left->first) == sizeof(short) || sizeof(*&left->first) == sizeof(int) || sizeof(*&left->first) == sizeof(long)) || sizeof(*&left->first) == sizeof(long long))) __compiletime_assert_254(); } while (0); do { *(volatile typeof(*&left->first) *)&(*&left->first) = ((typeof(*((typeof(left->first))_r_a_p__v)) *)((typeof(left->first))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + do { uintptr_t _r_a_p__v = (uintptr_t)(node1); ; if (__builtin_constant_p(node1) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_255(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((right->first)) == sizeof(char) || sizeof((right->first)) == sizeof(short) || sizeof((right->first)) == sizeof(int) || sizeof((right->first)) == sizeof(long)) || sizeof((right->first)) == sizeof(long long))) __compiletime_assert_255(); } while (0); do { *(volatile typeof((right->first)) *)&((right->first)) = ((typeof(right->first))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_256(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&right->first) == sizeof(char) || sizeof(*&right->first) == sizeof(short) || sizeof(*&right->first) == sizeof(int) || sizeof(*&right->first) == sizeof(long)))) __compiletime_assert_256(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_257(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&right->first) == sizeof(char) || sizeof(*&right->first) == sizeof(short) || sizeof(*&right->first) == sizeof(int) || sizeof(*&right->first) == sizeof(long)) || sizeof(*&right->first) == sizeof(long long))) __compiletime_assert_257(); } while (0); do { *(volatile typeof(*&right->first) *)&(*&right->first) = ((typeof(*((typeof(right->first))_r_a_p__v)) *)((typeof(right->first))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + do { do { extern void __compiletime_assert_258(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(node2->pprev) == sizeof(char) || sizeof(node2->pprev) == sizeof(short) || sizeof(node2->pprev) == sizeof(int) || sizeof(node2->pprev) == sizeof(long)) || sizeof(node2->pprev) == sizeof(long long))) __compiletime_assert_258(); } while (0); do { *(volatile typeof(node2->pprev) *)&(node2->pprev) = (&left->first); } while (0); } while (0); + do { do { extern void __compiletime_assert_259(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(node1->pprev) == sizeof(char) || sizeof(node1->pprev) == sizeof(short) || sizeof(node1->pprev) == sizeof(int) || sizeof(node1->pprev) == sizeof(long)) || sizeof(node1->pprev) == sizeof(long long))) __compiletime_assert_259(); } while (0); do { *(volatile typeof(node1->pprev) *)&(node1->pprev) = (&right->first); } while (0); } while (0); +} +# 556 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_head_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *first = h->first; + + n->next = first; + do { do { extern void __compiletime_assert_260(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_260(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&h->first); } while (0); } while (0); + do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_261(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(h)->first))))) == sizeof(long long))) __compiletime_assert_261(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)(&(h)->first))))) *)&(((*((struct hlist_node **)(&(h)->first))))) = ((typeof((*((struct hlist_node **)(&(h)->first)))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_262(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(long)))) __compiletime_assert_262(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_263(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)(&(h)->first)))) == sizeof(long long))) __compiletime_assert_263(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)(&(h)->first)))) *)&(*&(*((struct hlist_node **)(&(h)->first)))) = ((typeof(*((typeof((*((struct hlist_node **)(&(h)->first)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(h)->first)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + if (first) + do { do { extern void __compiletime_assert_264(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(first->pprev) == sizeof(char) || sizeof(first->pprev) == sizeof(short) || sizeof(first->pprev) == sizeof(int) || sizeof(first->pprev) == sizeof(long)) || sizeof(first->pprev) == sizeof(long long))) __compiletime_assert_264(); } while (0); do { *(volatile typeof(first->pprev) *)&(first->pprev) = (&n->next); } while (0); } while (0); +} +# 587 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_tail_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *i, *last = ((void *)0); + + + for (i = h->first; i; i = i->next) + last = i; + + if (last) { + n->next = last->next; + do { do { extern void __compiletime_assert_265(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_265(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&last->next); } while (0); } while (0); + do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_266(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(long long))) __compiletime_assert_266(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)(&(last)->next))))) *)&(((*((struct hlist_node **)(&(last)->next))))) = ((typeof((*((struct hlist_node **)(&(last)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_267(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)))) __compiletime_assert_267(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_268(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long long))) __compiletime_assert_268(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)(&(last)->next)))) *)&(*&(*((struct hlist_node **)(&(last)->next)))) = ((typeof(*((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + } else { + hlist_add_head_rcu(n, h); + } +} +# 623 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_before_rcu(struct hlist_node *n, + struct hlist_node *next) +{ + do { do { extern void __compiletime_assert_269(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_269(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (next->pprev); } while (0); } while (0); + n->next = next; + do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_270(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(char) || sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(short) || sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(int) || sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)((n)->pprev))))) == sizeof(long long))) __compiletime_assert_270(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)((n)->pprev))))) *)&(((*((struct hlist_node **)((n)->pprev))))) = ((typeof((*((struct hlist_node **)((n)->pprev)))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_271(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(long)))) __compiletime_assert_271(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_272(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)((n)->pprev)))) == sizeof(long long))) __compiletime_assert_272(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)((n)->pprev)))) *)&(*&(*((struct hlist_node **)((n)->pprev)))) = ((typeof(*((typeof((*((struct hlist_node **)((n)->pprev)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)((n)->pprev)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + do { do { extern void __compiletime_assert_273(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(next->pprev) == sizeof(char) || sizeof(next->pprev) == sizeof(short) || sizeof(next->pprev) == sizeof(int) || sizeof(next->pprev) == sizeof(long)) || sizeof(next->pprev) == sizeof(long long))) __compiletime_assert_273(); } while (0); do { *(volatile typeof(next->pprev) *)&(next->pprev) = (&n->next); } while (0); } while (0); +} +# 650 "./include/linux/rculist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_add_behind_rcu(struct hlist_node *n, + struct hlist_node *prev) +{ + n->next = prev->next; + do { do { extern void __compiletime_assert_274(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_274(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&prev->next); } while (0); } while (0); + do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_275(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(prev)->next))))) == sizeof(long long))) __compiletime_assert_275(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)(&(prev)->next))))) *)&(((*((struct hlist_node **)(&(prev)->next))))) = ((typeof((*((struct hlist_node **)(&(prev)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_276(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(long)))) __compiletime_assert_276(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_277(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)(&(prev)->next)))) == sizeof(long long))) __compiletime_assert_277(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)(&(prev)->next)))) *)&(*&(*((struct hlist_node **)(&(prev)->next)))) = ((typeof(*((typeof((*((struct hlist_node **)(&(prev)->next)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(prev)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + if (n->next) + do { do { extern void __compiletime_assert_278(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->next->pprev) == sizeof(char) || sizeof(n->next->pprev) == sizeof(short) || sizeof(n->next->pprev) == sizeof(int) || sizeof(n->next->pprev) == sizeof(long)) || sizeof(n->next->pprev) == sizeof(long long))) __compiletime_assert_278(); } while (0); do { *(volatile typeof(n->next->pprev) *)&(n->next->pprev) = (&n->next); } while (0); } while (0); +} +# 8 "./include/linux/dcache.h" 2 +# 1 "./include/linux/rculist_bl.h" 1 + + + + + + + +# 1 "./include/linux/list_bl.h" 1 + + + + + +# 1 "./include/linux/bit_spinlock.h" 1 +# 16 "./include/linux/bit_spinlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bit_spin_lock(int bitnum, unsigned long *addr) +{ + + + + + + + + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + + while (__builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 0)) { + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + do { + cpu_relax(); + } while (test_bit(bitnum, addr)); + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + } + + (void)0; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bit_spin_trylock(int bitnum, unsigned long *addr) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + + if (__builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 0)) { + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + return 0; + } + + (void)0; + return 1; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bit_spin_unlock(int bitnum, unsigned long *addr) +{ + + do { if (__builtin_expect(!!(!test_bit(bitnum, addr)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (279)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (60), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (280)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + + clear_bit_unlock(bitnum, addr); + + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + (void)0; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ + + do { if (__builtin_expect(!!(!test_bit(bitnum, addr)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (281)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (77), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (282)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + + __clear_bit_unlock(bitnum, addr); + + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + (void)0; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ + + return test_bit(bitnum, addr); + + + + + +} +# 7 "./include/linux/list_bl.h" 2 +# 34 "./include/linux/list_bl.h" +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; +}; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) +{ + h->next = ((void *)0); + h->pprev = ((void *)0); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hlist_bl_unhashed(const struct hlist_bl_node *h) +{ + return !h->pprev; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) +{ + return (struct hlist_bl_node *) + ((unsigned long)h->first & ~1UL); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_set_first(struct hlist_bl_head *h, + struct hlist_bl_node *n) +{ + do { if (__builtin_expect(!!((unsigned long)n & 1UL), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (283)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/list_bl.h"), "i" (66), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (284)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + do { if (__builtin_expect(!!(((unsigned long)h->first & 1UL) != 1UL), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (285)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/list_bl.h"), "i" (67), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (286)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0) + ; + h->first = (struct hlist_bl_node *)((unsigned long)n | 1UL); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hlist_bl_empty(const struct hlist_bl_head *h) +{ + return !((unsigned long)({ do { extern void __compiletime_assert_287(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(h->first) == sizeof(char) || sizeof(h->first) == sizeof(short) || sizeof(h->first) == sizeof(int) || sizeof(h->first) == sizeof(long)) || sizeof(h->first) == sizeof(long long))) __compiletime_assert_287(); } while (0); ({ typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) __x = (*(const volatile typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) *)&(h->first)); do { } while (0); (typeof(h->first))__x; }); }) & ~1UL); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_add_head(struct hlist_bl_node *n, + struct hlist_bl_head *h) +{ + struct hlist_bl_node *first = hlist_bl_first(h); + + n->next = first; + if (first) + first->pprev = &n->next; + n->pprev = &h->first; + hlist_bl_set_first(h, n); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_add_before(struct hlist_bl_node *n, + struct hlist_bl_node *next) +{ + struct hlist_bl_node **pprev = next->pprev; + + n->pprev = pprev; + n->next = next; + next->pprev = &n->next; + + + do { do { extern void __compiletime_assert_288(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pprev) == sizeof(char) || sizeof(*pprev) == sizeof(short) || sizeof(*pprev) == sizeof(int) || sizeof(*pprev) == sizeof(long)) || sizeof(*pprev) == sizeof(long long))) __compiletime_assert_288(); } while (0); do { *(volatile typeof(*pprev) *)&(*pprev) = ((struct hlist_bl_node *) ((uintptr_t)n | ((uintptr_t)*pprev & 1UL))); } while (0); } while (0) + + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_add_behind(struct hlist_bl_node *n, + struct hlist_bl_node *prev) +{ + n->next = prev->next; + n->pprev = &prev->next; + prev->next = n; + + if (n->next) + n->next->pprev = &n->next; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __hlist_bl_del(struct hlist_bl_node *n) +{ + struct hlist_bl_node *next = n->next; + struct hlist_bl_node **pprev = n->pprev; + + do { if (__builtin_expect(!!((unsigned long)n & 1UL), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (289)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/list_bl.h"), "i" (120), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (290)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + + do { do { extern void __compiletime_assert_291(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pprev) == sizeof(char) || sizeof(*pprev) == sizeof(short) || sizeof(*pprev) == sizeof(int) || sizeof(*pprev) == sizeof(long)) || sizeof(*pprev) == sizeof(long long))) __compiletime_assert_291(); } while (0); do { *(volatile typeof(*pprev) *)&(*pprev) = ((struct hlist_bl_node *) ((unsigned long)next | ((unsigned long)*pprev & 1UL))); } while (0); } while (0) + + + ; + if (next) + next->pprev = pprev; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_del(struct hlist_bl_node *n) +{ + __hlist_bl_del(n); + n->next = ((void *) 0x100 + (0xdead000000000000UL)); + n->pprev = ((void *) 0x122 + (0xdead000000000000UL)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_del_init(struct hlist_bl_node *n) +{ + if (!hlist_bl_unhashed(n)) { + __hlist_bl_del(n); + INIT_HLIST_BL_NODE(n); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_lock(struct hlist_bl_head *b) +{ + bit_spin_lock(0, (unsigned long *)b); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_unlock(struct hlist_bl_head *b) +{ + __bit_spin_unlock(0, (unsigned long *)b); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hlist_bl_is_locked(struct hlist_bl_head *b) +{ + return bit_spin_is_locked(0, (unsigned long *)b); +} +# 9 "./include/linux/rculist_bl.h" 2 + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_set_first_rcu(struct hlist_bl_head *h, + struct hlist_bl_node *n) +{ + do { if (__builtin_expect(!!((unsigned long)n & 1UL), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (292)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/rculist_bl.h"), "i" (14), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (293)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + do { if (__builtin_expect(!!(((unsigned long)h->first & 1UL) != 1UL), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (294)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/rculist_bl.h"), "i" (15), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (295)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0) + ; + do { uintptr_t _r_a_p__v = (uintptr_t)((struct hlist_bl_node *)((unsigned long)n | 1UL)); ; if (__builtin_constant_p((struct hlist_bl_node *)((unsigned long)n | 1UL)) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_296(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((h->first)) == sizeof(char) || sizeof((h->first)) == sizeof(short) || sizeof((h->first)) == sizeof(int) || sizeof((h->first)) == sizeof(long)) || sizeof((h->first)) == sizeof(long long))) __compiletime_assert_296(); } while (0); do { *(volatile typeof((h->first)) *)&((h->first)) = ((typeof(h->first))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_297(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&h->first) == sizeof(char) || sizeof(*&h->first) == sizeof(short) || sizeof(*&h->first) == sizeof(int) || sizeof(*&h->first) == sizeof(long)))) __compiletime_assert_297(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_298(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&h->first) == sizeof(char) || sizeof(*&h->first) == sizeof(short) || sizeof(*&h->first) == sizeof(int) || sizeof(*&h->first) == sizeof(long)) || sizeof(*&h->first) == sizeof(long long))) __compiletime_assert_298(); } while (0); do { *(volatile typeof(*&h->first) *)&(*&h->first) = ((typeof(*((typeof(h->first))_r_a_p__v)) *)((typeof(h->first))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) +{ + return (struct hlist_bl_node *) + ((unsigned long)({ typeof(*(h->first)) *________p1 = (typeof(*(h->first)) *)({ do { extern void __compiletime_assert_299(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((h->first)) == sizeof(char) || sizeof((h->first)) == sizeof(short) || sizeof((h->first)) == sizeof(int) || sizeof((h->first)) == sizeof(long)) || sizeof((h->first)) == sizeof(long long))) __compiletime_assert_299(); } while (0); ({ typeof( _Generic(((h->first)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((h->first)))) __x = (*(const volatile typeof( _Generic(((h->first)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((h->first)))) *)&((h->first))); do { } while (0); (typeof((h->first)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((hlist_bl_is_locked(h)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rculist_bl.h", 24, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(h->first)) *)(________p1)); }) & ~1UL); +} +# 46 "./include/linux/rculist_bl.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_del_rcu(struct hlist_bl_node *n) +{ + __hlist_bl_del(n); + n->pprev = ((void *) 0x122 + (0xdead000000000000UL)); +} +# 71 "./include/linux/rculist_bl.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_bl_add_head_rcu(struct hlist_bl_node *n, + struct hlist_bl_head *h) +{ + struct hlist_bl_node *first; + + + first = hlist_bl_first(h); + + n->next = first; + if (first) + first->pprev = &n->next; + n->pprev = &h->first; + + + hlist_bl_set_first_rcu(h, n); +} +# 9 "./include/linux/dcache.h" 2 + +# 1 "./include/linux/seqlock.h" 1 +# 64 "./include/linux/seqlock.h" +typedef struct seqcount { + unsigned sequence; + + struct lockdep_map dep_map; + +} seqcount_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __seqcount_init(seqcount_t *s, const char *name, + struct lock_class_key *key) +{ + + + + lockdep_init_map(&s->dep_map, name, key, 0); + s->sequence = 0; +} +# 91 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void seqcount_lockdep_reader_access(const seqcount_t *s) +{ + seqcount_t *l = (seqcount_t *)s; + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + lock_acquire(&l->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + lock_release(&l->dep_map, (unsigned long)__builtin_return_address(0)); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} +# 124 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned __read_seqcount_begin(const seqcount_t *s) +{ + unsigned ret; + +repeat: + ret = ({ do { extern void __compiletime_assert_300(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(s->sequence) == sizeof(char) || sizeof(s->sequence) == sizeof(short) || sizeof(s->sequence) == sizeof(int) || sizeof(s->sequence) == sizeof(long)) || sizeof(s->sequence) == sizeof(long long))) __compiletime_assert_300(); } while (0); ({ typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) __x = (*(const volatile typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) *)&(s->sequence)); do { } while (0); (typeof(s->sequence))__x; }); }); + if (__builtin_expect(!!(ret & 1), 0)) { + cpu_relax(); + goto repeat; + } + kcsan_atomic_next(1000); + return ret; +} +# 147 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned raw_read_seqcount(const seqcount_t *s) +{ + unsigned ret = ({ do { extern void __compiletime_assert_301(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(s->sequence) == sizeof(char) || sizeof(s->sequence) == sizeof(short) || sizeof(s->sequence) == sizeof(int) || sizeof(s->sequence) == sizeof(long)) || sizeof(s->sequence) == sizeof(long long))) __compiletime_assert_301(); } while (0); ({ typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) __x = (*(const volatile typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) *)&(s->sequence)); do { } while (0); (typeof(s->sequence))__x; }); }); + __asm__ __volatile__("": : :"memory"); + kcsan_atomic_next(1000); + return ret; +} +# 164 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned raw_read_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = __read_seqcount_begin(s); + __asm__ __volatile__("": : :"memory"); + return ret; +} +# 180 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned read_seqcount_begin(const seqcount_t *s) +{ + seqcount_lockdep_reader_access(s); + return raw_read_seqcount_begin(s); +} +# 200 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned raw_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = ({ do { extern void __compiletime_assert_302(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(s->sequence) == sizeof(char) || sizeof(s->sequence) == sizeof(short) || sizeof(s->sequence) == sizeof(int) || sizeof(s->sequence) == sizeof(long)) || sizeof(s->sequence) == sizeof(long long))) __compiletime_assert_302(); } while (0); ({ typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) __x = (*(const volatile typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) *)&(s->sequence)); do { } while (0); (typeof(s->sequence))__x; }); }); + __asm__ __volatile__("": : :"memory"); + kcsan_atomic_next(1000); + return ret & ~1; +} +# 222 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __read_seqcount_retry(const seqcount_t *s, unsigned start) +{ + kcsan_atomic_next(0); + return __builtin_expect(!!(({ do { extern void __compiletime_assert_303(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(s->sequence) == sizeof(char) || sizeof(s->sequence) == sizeof(short) || sizeof(s->sequence) == sizeof(int) || sizeof(s->sequence) == sizeof(long)) || sizeof(s->sequence) == sizeof(long long))) __compiletime_assert_303(); } while (0); ({ typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) __x = (*(const volatile typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) *)&(s->sequence)); do { } while (0); (typeof(s->sequence))__x; }); }) != start), 0); +} +# 238 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int read_seqcount_retry(const seqcount_t *s, unsigned start) +{ + __asm__ __volatile__("": : :"memory"); + return __read_seqcount_retry(s, start); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void raw_write_seqcount_begin(seqcount_t *s) +{ + kcsan_nestable_atomic_begin(); + s->sequence++; + __asm__ __volatile__("": : :"memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void raw_write_seqcount_end(seqcount_t *s) +{ + __asm__ __volatile__("": : :"memory"); + s->sequence++; + kcsan_nestable_atomic_end(); +} +# 301 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void raw_write_seqcount_barrier(seqcount_t *s) +{ + kcsan_nestable_atomic_begin(); + s->sequence++; + __asm__ __volatile__("": : :"memory"); + s->sequence++; + kcsan_nestable_atomic_end(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int raw_read_seqcount_latch(seqcount_t *s) +{ + + int seq = ({ do { extern void __compiletime_assert_304(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(s->sequence) == sizeof(char) || sizeof(s->sequence) == sizeof(short) || sizeof(s->sequence) == sizeof(int) || sizeof(s->sequence) == sizeof(long)) || sizeof(s->sequence) == sizeof(long long))) __compiletime_assert_304(); } while (0); ({ typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) __x = (*(const volatile typeof( _Generic((s->sequence), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (s->sequence))) *)&(s->sequence)); do { } while (0); (typeof(s->sequence))__x; }); }); + return seq; +} +# 394 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void raw_write_seqcount_latch(seqcount_t *s) +{ + __asm__ __volatile__("": : :"memory"); + s->sequence++; + __asm__ __volatile__("": : :"memory"); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqcount_begin_nested(seqcount_t *s, int subclass) +{ + raw_write_seqcount_begin(s); + lock_acquire(&s->dep_map, subclass, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqcount_begin(seqcount_t *s) +{ + write_seqcount_begin_nested(s, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqcount_end(seqcount_t *s) +{ + lock_release(&s->dep_map, (unsigned long)__builtin_return_address(0)); + raw_write_seqcount_end(s); +} +# 429 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqcount_invalidate(seqcount_t *s) +{ + __asm__ __volatile__("": : :"memory"); + kcsan_nestable_atomic_begin(); + s->sequence+=2; + kcsan_nestable_atomic_end(); +} + +typedef struct { + struct seqcount seqcount; + spinlock_t lock; +} seqlock_t; +# 464 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned read_seqbegin(const seqlock_t *sl) +{ + unsigned ret = read_seqcount_begin(&sl->seqcount); + + kcsan_atomic_next(0); + kcsan_flat_atomic_begin(); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned read_seqretry(const seqlock_t *sl, unsigned start) +{ + + + + + kcsan_flat_atomic_end(); + + return read_seqcount_retry(&sl->seqcount, start); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqlock(seqlock_t *sl) +{ + spin_lock(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_sequnlock(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqlock_bh(seqlock_t *sl) +{ + spin_lock_bh(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_sequnlock_bh(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_seqlock_irq(seqlock_t *sl) +{ + spin_lock_irq(&sl->lock); + write_seqcount_begin(&sl->seqcount); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_sequnlock_irq(seqlock_t *sl) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __write_seqlock_irqsave(seqlock_t *sl) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&sl->lock)); } while (0); } while (0); + write_seqcount_begin(&sl->seqcount); + return flags; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) +{ + write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_seqlock_excl(seqlock_t *sl) +{ + spin_lock(&sl->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_sequnlock_excl(seqlock_t *sl) +{ + spin_unlock(&sl->lock); +} +# 569 "./include/linux/seqlock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_seqbegin_or_lock(seqlock_t *lock, int *seq) +{ + if (!(*seq & 1)) + *seq = read_seqbegin(lock); + else + read_seqlock_excl(lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int need_seqretry(seqlock_t *lock, int seq) +{ + return !(seq & 1) && read_seqretry(lock, seq); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void done_seqretry(seqlock_t *lock, int seq) +{ + if (seq & 1) + read_sequnlock_excl(lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_seqlock_excl_bh(seqlock_t *sl) +{ + spin_lock_bh(&sl->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_sequnlock_excl_bh(seqlock_t *sl) +{ + spin_unlock_bh(&sl->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_seqlock_excl_irq(seqlock_t *sl) +{ + spin_lock_irq(&sl->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void read_sequnlock_excl_irq(seqlock_t *sl) +{ + spin_unlock_irq(&sl->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&sl->lock)); } while (0); } while (0); + return flags; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) +{ + spin_unlock_irqrestore(&sl->lock, flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) +{ + unsigned long flags = 0; + + if (!(*seq & 1)) + *seq = read_seqbegin(lock); + else + do { flags = __read_seqlock_excl_irqsave(lock); } while (0); + + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) +{ + if (seq & 1) + read_sequnlock_excl_irqrestore(lock, flags); +} +# 11 "./include/linux/dcache.h" 2 + + +# 1 "./include/linux/lockref.h" 1 +# 19 "./include/linux/lockref.h" +# 1 "./include/generated/bounds.h" 1 +# 20 "./include/linux/lockref.h" 2 + + + + + +struct lockref { + union { + + + + struct { + spinlock_t lock; + int count; + }; + }; +}; + +extern void lockref_get(struct lockref *); +extern int lockref_put_return(struct lockref *); +extern int lockref_get_not_zero(struct lockref *); +extern int lockref_put_not_zero(struct lockref *); +extern int lockref_get_or_lock(struct lockref *); +extern int lockref_put_or_lock(struct lockref *); + +extern void lockref_mark_dead(struct lockref *); +extern int lockref_get_not_dead(struct lockref *); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __lockref_is_dead(const struct lockref *l) +{ + return ((int)l->count < 0); +} +# 14 "./include/linux/dcache.h" 2 +# 1 "./include/linux/stringhash.h" 1 + + + + + + +# 1 "./include/linux/hash.h" 1 + + + + + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 7 "./include/linux/hash.h" 2 +# 60 "./include/linux/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __hash_32_generic(u32 val) +{ + return val * 0x61C88647; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 hash_32_generic(u32 val, unsigned int bits) +{ + + return __hash_32_generic(val) >> (32 - bits); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32 hash_64_generic(u64 val, unsigned int bits) +{ + + + return val * 0x61C8864680B583EBull >> (64 - bits); + + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 hash_ptr(const void *ptr, unsigned int bits) +{ + return hash_64_generic((unsigned long)ptr, bits); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 hash32_ptr(const void *ptr) +{ + unsigned long val = (unsigned long)ptr; + + + val ^= (val >> 32); + + return (u32)val; +} +# 8 "./include/linux/stringhash.h" 2 +# 42 "./include/linux/stringhash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +partial_name_hash(unsigned long c, unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int end_name_hash(unsigned long hash) +{ + return hash_64_generic(hash, 32); +} +# 66 "./include/linux/stringhash.h" +extern unsigned int __attribute__((__pure__)) full_name_hash(const void *salt, const char *, unsigned int); +# 77 "./include/linux/stringhash.h" +extern u64 __attribute__((__pure__)) hashlen_string(const void *salt, const char *name); +# 15 "./include/linux/dcache.h" 2 + + +struct path; +struct vfsmount; +# 47 "./include/linux/dcache.h" +struct qstr { + union { + struct { + u32 hash; u32 len; + }; + u64 hash_len; + }; + const unsigned char *name; +}; + + + +extern const struct qstr empty_name; +extern const struct qstr slash_name; + +struct dentry_stat_t { + long nr_dentry; + long nr_unused; + long age_limit; + long want_pages; + long nr_negative; + long dummy; +}; +extern struct dentry_stat_t dentry_stat; +# 89 "./include/linux/dcache.h" +struct dentry { + + unsigned int d_flags; + seqcount_t d_seq; + struct hlist_bl_node d_hash; + struct dentry *d_parent; + struct qstr d_name; + struct inode *d_inode; + + unsigned char d_iname[32]; + + + struct lockref d_lockref; + const struct dentry_operations *d_op; + struct super_block *d_sb; + unsigned long d_time; + void *d_fsdata; + + union { + struct list_head d_lru; + wait_queue_head_t *d_wait; + }; + struct list_head d_child; + struct list_head d_subdirs; + + + + union { + struct hlist_node d_alias; + struct hlist_bl_node d_in_lookup_hash; + struct callback_head d_rcu; + } d_u; +} __attribute__((__designated_init__)); + + + + + + + +enum dentry_d_lock_class +{ + DENTRY_D_LOCK_NORMAL, + DENTRY_D_LOCK_NESTED +}; + +struct dentry_operations { + int (*d_revalidate)(struct dentry *, unsigned int); + int (*d_weak_revalidate)(struct dentry *, unsigned int); + int (*d_hash)(const struct dentry *, struct qstr *); + int (*d_compare)(const struct dentry *, + unsigned int, const char *, const struct qstr *); + int (*d_delete)(const struct dentry *); + int (*d_init)(struct dentry *); + void (*d_release)(struct dentry *); + void (*d_prune)(struct dentry *); + void (*d_iput)(struct dentry *, struct inode *); + char *(*d_dname)(struct dentry *, char *, int); + struct vfsmount *(*d_automount)(struct path *); + int (*d_manage)(const struct path *, bool); + struct dentry *(*d_real)(struct dentry *, const struct inode *); +} __attribute__((__aligned__((1 << (6))))); +# 223 "./include/linux/dcache.h" +extern seqlock_t rename_lock; + + + + +extern void d_instantiate(struct dentry *, struct inode *); +extern void d_instantiate_new(struct dentry *, struct inode *); +extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); +extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); +extern void __d_drop(struct dentry *dentry); +extern void d_drop(struct dentry *dentry); +extern void d_delete(struct dentry *); +extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op); + + +extern struct dentry * d_alloc(struct dentry *, const struct qstr *); +extern struct dentry * d_alloc_anon(struct super_block *); +extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, + wait_queue_head_t *); +extern struct dentry * d_splice_alias(struct inode *, struct dentry *); +extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); +extern struct dentry * d_exact_alias(struct dentry *, struct inode *); +extern struct dentry *d_find_any_alias(struct inode *inode); +extern struct dentry * d_obtain_alias(struct inode *); +extern struct dentry * d_obtain_root(struct inode *); +extern void shrink_dcache_sb(struct super_block *); +extern void shrink_dcache_parent(struct dentry *); +extern void shrink_dcache_for_umount(struct super_block *); +extern void d_invalidate(struct dentry *); + + +extern struct dentry * d_make_root(struct inode *); + + +extern void d_genocide(struct dentry *); + +extern void d_tmpfile(struct dentry *, struct inode *); + +extern struct dentry *d_find_alias(struct inode *); +extern void d_prune_aliases(struct inode *); + + +extern int path_has_submounts(const struct path *); + + + + +extern void d_rehash(struct dentry *); + +extern void d_add(struct dentry *, struct inode *); + + +extern void d_move(struct dentry *, struct dentry *); +extern void d_exchange(struct dentry *, struct dentry *); +extern struct dentry *d_ancestor(struct dentry *, struct dentry *); + + +extern struct dentry *d_lookup(const struct dentry *, const struct qstr *); +extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); +extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *); +extern struct dentry *__d_lookup_rcu(const struct dentry *parent, + const struct qstr *name, unsigned *seq); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned d_count(const struct dentry *dentry) +{ + return dentry->d_lockref.count; +} + + + + +extern __attribute__((__format__(printf, 4, 5))) +char *dynamic_dname(struct dentry *, char *, int, const char *, ...); + +extern char *__d_path(const struct path *, const struct path *, char *, int); +extern char *d_absolute_path(const struct path *, char *, int); +extern char *d_path(const struct path *, char *, int); +extern char *dentry_path_raw(struct dentry *, char *, int); +extern char *dentry_path(struct dentry *, char *, int); +# 313 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dentry *dget_dlock(struct dentry *dentry) +{ + if (dentry) + dentry->d_lockref.count++; + return dentry; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dentry *dget(struct dentry *dentry) +{ + if (dentry) + lockref_get(&dentry->d_lockref); + return dentry; +} + +extern struct dentry *dget_parent(struct dentry *dentry); +# 336 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int d_unhashed(const struct dentry *dentry) +{ + return hlist_bl_unhashed(&dentry->d_hash); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int d_unlinked(const struct dentry *dentry) +{ + return d_unhashed(dentry) && !((dentry) == (dentry)->d_parent); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cant_mount(const struct dentry *dentry) +{ + return (dentry->d_flags & 0x00000100); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dont_mount(struct dentry *dentry) +{ + spin_lock(&dentry->d_lockref.lock); + dentry->d_flags |= 0x00000100; + spin_unlock(&dentry->d_lockref.lock); +} + +extern void __d_lookup_done(struct dentry *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int d_in_lookup(const struct dentry *dentry) +{ + return dentry->d_flags & 0x10000000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void d_lookup_done(struct dentry *dentry) +{ + if (__builtin_expect(!!(d_in_lookup(dentry)), 0)) { + spin_lock(&dentry->d_lockref.lock); + __d_lookup_done(dentry); + spin_unlock(&dentry->d_lockref.lock); + } +} + +extern void dput(struct dentry *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_managed(const struct dentry *dentry) +{ + return dentry->d_flags & (0x00010000|0x00020000|0x00040000); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_mountpoint(const struct dentry *dentry) +{ + return dentry->d_flags & 0x00010000; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned __d_entry_type(const struct dentry *dentry) +{ + return dentry->d_flags & 0x00700000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_miss(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == 0x00000000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_whiteout(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == 0x00100000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_can_lookup(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == 0x00200000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_autodir(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == 0x00300000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_dir(const struct dentry *dentry) +{ + return d_can_lookup(dentry) || d_is_autodir(dentry); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_symlink(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == 0x00600000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_reg(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == 0x00400000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_special(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == 0x00500000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_file(const struct dentry *dentry) +{ + return d_is_reg(dentry) || d_is_special(dentry); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_negative(const struct dentry *dentry) +{ + + return d_is_miss(dentry); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_flags_negative(unsigned flags) +{ + return (flags & 0x00700000) == 0x00000000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_positive(const struct dentry *dentry) +{ + return !d_is_negative(dentry); +} +# 470 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_really_is_negative(const struct dentry *dentry) +{ + return dentry->d_inode == ((void *)0); +} +# 488 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_really_is_positive(const struct dentry *dentry) +{ + return dentry->d_inode != ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int simple_positive(const struct dentry *dentry) +{ + return d_really_is_positive(dentry) && !d_unhashed(dentry); +} + +extern void d_set_fallthru(struct dentry *dentry); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool d_is_fallthru(const struct dentry *dentry) +{ + return dentry->d_flags & 0x01000000; +} + + +extern int sysctl_vfs_cache_pressure; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long vfs_pressure_ratio(unsigned long val) +{ + return ( { typeof(val) quot = (val) / (100); typeof(val) rem = (val) % (100); (quot * (sysctl_vfs_cache_pressure)) + ((rem * (sysctl_vfs_cache_pressure)) / (100)); } ); +} +# 520 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct inode *d_inode(const struct dentry *dentry) +{ + return dentry->d_inode; +} +# 532 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct inode *d_inode_rcu(const struct dentry *dentry) +{ + return ({ do { extern void __compiletime_assert_305(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(dentry->d_inode) == sizeof(char) || sizeof(dentry->d_inode) == sizeof(short) || sizeof(dentry->d_inode) == sizeof(int) || sizeof(dentry->d_inode) == sizeof(long)) || sizeof(dentry->d_inode) == sizeof(long long))) __compiletime_assert_305(); } while (0); ({ typeof( _Generic((dentry->d_inode), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dentry->d_inode))) __x = (*(const volatile typeof( _Generic((dentry->d_inode), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dentry->d_inode))) *)&(dentry->d_inode)); do { } while (0); (typeof(dentry->d_inode))__x; }); }); +} +# 547 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct inode *d_backing_inode(const struct dentry *upper) +{ + struct inode *inode = upper->d_inode; + + return inode; +} +# 564 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dentry *d_backing_dentry(struct dentry *upper) +{ + return upper; +} +# 579 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dentry *d_real(struct dentry *dentry, + const struct inode *inode) +{ + if (__builtin_expect(!!(dentry->d_flags & 0x04000000), 0)) + return dentry->d_op->d_real(dentry, inode); + else + return dentry; +} +# 595 "./include/linux/dcache.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct inode *d_real_inode(const struct dentry *dentry) +{ + + return d_backing_inode(d_real((struct dentry *) dentry, ((void *)0))); +} + +struct name_snapshot { + struct qstr name; + unsigned char inline_name[32]; +}; +void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); +void release_dentry_name_snapshot(struct name_snapshot *); +# 9 "./include/linux/fs.h" 2 +# 1 "./include/linux/path.h" 1 + + + + +struct dentry; +struct vfsmount; + +struct path { + struct vfsmount *mnt; + struct dentry *dentry; +} __attribute__((__designated_init__)); + +extern void path_get(const struct path *); +extern void path_put(const struct path *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int path_equal(const struct path *path1, const struct path *path2) +{ + return path1->mnt == path2->mnt && path1->dentry == path2->dentry; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void path_put_init(struct path *path) +{ + path_put(path); + *path = (struct path) { }; +} +# 10 "./include/linux/fs.h" 2 +# 1 "./include/linux/stat.h" 1 + + + + + +# 1 "./arch/x86/include/uapi/asm/stat.h" 1 + + + + +# 1 "./arch/x86/include/asm/posix_types.h" 1 +# 6 "./arch/x86/include/uapi/asm/stat.h" 2 +# 83 "./arch/x86/include/uapi/asm/stat.h" +struct stat { + __kernel_ulong_t st_dev; + __kernel_ulong_t st_ino; + __kernel_ulong_t st_nlink; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad0; + __kernel_ulong_t st_rdev; + __kernel_long_t st_size; + __kernel_long_t st_blksize; + __kernel_long_t st_blocks; + + __kernel_ulong_t st_atime; + __kernel_ulong_t st_atime_nsec; + __kernel_ulong_t st_mtime; + __kernel_ulong_t st_mtime_nsec; + __kernel_ulong_t st_ctime; + __kernel_ulong_t st_ctime_nsec; + __kernel_long_t __unused[3]; +}; +# 117 "./arch/x86/include/uapi/asm/stat.h" +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + + + + + + + unsigned int st_size; + unsigned int st_atime; + unsigned int st_mtime; + unsigned int st_ctime; + +}; +# 7 "./include/linux/stat.h" 2 +# 1 "./include/uapi/linux/stat.h" 1 +# 56 "./include/uapi/linux/stat.h" +struct statx_timestamp { + __s64 tv_sec; + __u32 tv_nsec; + __s32 __reserved; +}; +# 99 "./include/uapi/linux/stat.h" +struct statx { + + __u32 stx_mask; + __u32 stx_blksize; + __u64 stx_attributes; + + __u32 stx_nlink; + __u32 stx_uid; + __u32 stx_gid; + __u16 stx_mode; + __u16 __spare0[1]; + + __u64 stx_ino; + __u64 stx_size; + __u64 stx_blocks; + __u64 stx_attributes_mask; + + struct statx_timestamp stx_atime; + struct statx_timestamp stx_btime; + struct statx_timestamp stx_ctime; + struct statx_timestamp stx_mtime; + + __u32 stx_rdev_major; + __u32 stx_rdev_minor; + __u32 stx_dev_major; + __u32 stx_dev_minor; + + __u64 stx_mnt_id; + __u64 __spare2; + + __u64 __spare3[12]; + +}; +# 8 "./include/linux/stat.h" 2 +# 19 "./include/linux/stat.h" +# 1 "./include/linux/time.h" 1 +# 10 "./include/linux/time.h" +extern struct timezone sys_tz; + +int get_timespec64(struct timespec64 *ts, + const struct __kernel_timespec *uts); +int put_timespec64(const struct timespec64 *ts, + struct __kernel_timespec *uts); +int get_itimerspec64(struct itimerspec64 *it, + const struct __kernel_itimerspec *uit); +int put_itimerspec64(const struct itimerspec64 *it, + struct __kernel_itimerspec *uit); + +extern time64_t mktime64(const unsigned int year, const unsigned int mon, + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec); +# 39 "./include/linux/time.h" +extern void clear_itimer(void); + + + + +extern long do_utimes(int dfd, const char *filename, struct timespec64 *times, int flags); + + + + + +struct tm { + + + + + int tm_sec; + + int tm_min; + + int tm_hour; + + int tm_mday; + + int tm_mon; + + long tm_year; + + int tm_wday; + + int tm_yday; +}; + +void time64_to_tm(time64_t totalsecs, int offset, struct tm *result); + +# 1 "./include/linux/time32.h" 1 +# 13 "./include/linux/time32.h" +# 1 "./include/linux/timex.h" 1 +# 56 "./include/linux/timex.h" +# 1 "./include/uapi/linux/timex.h" 1 +# 56 "./include/uapi/linux/timex.h" +# 1 "./include/linux/time.h" 1 +# 57 "./include/uapi/linux/timex.h" 2 +# 97 "./include/uapi/linux/timex.h" +struct __kernel_timex_timeval { + __kernel_time64_t tv_sec; + long long tv_usec; +}; + +struct __kernel_timex { + unsigned int modes; + int :32; + long long offset; + long long freq; + long long maxerror; + long long esterror; + int status; + int :32; + long long constant; + long long precision; + long long tolerance; + + + struct __kernel_timex_timeval time; + long long tick; + + long long ppsfreq; + long long jitter; + int shift; + int :32; + long long stabil; + long long jitcnt; + long long calcnt; + long long errcnt; + long long stbcnt; + + int tai; + + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; +}; +# 57 "./include/linux/timex.h" 2 + + + + + + +# 1 "./include/uapi/linux/param.h" 1 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/param.h" 1 +# 1 "./include/asm-generic/param.h" 1 + + + + +# 1 "./include/uapi/asm-generic/param.h" 1 +# 6 "./include/asm-generic/param.h" 2 +# 1 "./arch/x86/include/generated/uapi/asm/param.h" 2 +# 6 "./include/uapi/linux/param.h" 2 +# 64 "./include/linux/timex.h" 2 + +# 1 "./arch/x86/include/asm/timex.h" 1 + + + + + +# 1 "./arch/x86/include/asm/tsc.h" 1 +# 16 "./arch/x86/include/asm/tsc.h" +typedef unsigned long long cycles_t; + +extern unsigned int cpu_khz; +extern unsigned int tsc_khz; + +extern void disable_TSC(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) cycles_t get_cycles(void) +{ + + + + + + return rdtsc(); +} + +extern struct system_counterval_t convert_art_to_tsc(u64 art); +extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); + +extern void tsc_early_init(void); +extern void tsc_init(void); +extern unsigned long calibrate_delay_is_known(void); +extern void mark_tsc_unstable(char *reason); +extern int unsynchronized_tsc(void); +extern int check_tsc_unstable(void); +extern void mark_tsc_async_resets(char *reason); +extern unsigned long native_calibrate_cpu_early(void); +extern unsigned long native_calibrate_tsc(void); +extern unsigned long long native_sched_clock_from_tsc(u64 tsc); + +extern int tsc_clocksource_reliable; + +extern bool tsc_async_resets; +# 59 "./arch/x86/include/asm/tsc.h" +extern bool tsc_store_and_check_tsc_adjust(bool bootcpu); +extern void tsc_verify_tsc_adjust(bool resume); +extern void check_tsc_sync_source(int cpu); +extern void check_tsc_sync_target(void); + + + + + + + +extern int notsc_setup(char *); +extern void tsc_save_sched_clock_state(void); +extern void tsc_restore_sched_clock_state(void); + +unsigned long cpu_khz_from_msr(void); +# 7 "./arch/x86/include/asm/timex.h" 2 +# 66 "./include/linux/timex.h" 2 +# 139 "./include/linux/timex.h" +extern unsigned long tick_usec; +extern unsigned long tick_nsec; +# 154 "./include/linux/timex.h" +extern int do_adjtimex(struct __kernel_timex *); +extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx); + +extern void hardpps(const struct timespec64 *, const struct timespec64 *); + +int read_current_timer(unsigned long *timer_val); +void ntp_notify_cmos_timer(void); +# 14 "./include/linux/time32.h" 2 + +# 1 "./include/vdso/time32.h" 1 + + + + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; +# 16 "./include/linux/time32.h" 2 + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct old_utimbuf32 { + old_time32_t actime; + old_time32_t modtime; +}; + +struct old_timex32 { + u32 modes; + s32 offset; + s32 freq; + s32 maxerror; + s32 esterror; + s32 status; + s32 constant; + s32 precision; + s32 tolerance; + struct old_timeval32 time; + s32 tick; + s32 ppsfreq; + s32 jitter; + s32 shift; + s32 stabil; + s32 jitcnt; + s32 calcnt; + s32 errcnt; + s32 stbcnt; + s32 tai; + + s32:32; s32:32; s32:32; s32:32; + s32:32; s32:32; s32:32; s32:32; + s32:32; s32:32; s32:32; +}; + +extern int get_old_timespec32(struct timespec64 *, const void *); +extern int put_old_timespec32(const struct timespec64 *, void *); +extern int get_old_itimerspec32(struct itimerspec64 *its, + const struct old_itimerspec32 *uits); +extern int put_old_itimerspec32(const struct itimerspec64 *its, + struct old_itimerspec32 *uits); +struct __kernel_timex; +int get_old_timex32(struct __kernel_timex *, const struct old_timex32 *); +int put_old_timex32(struct old_timex32 *, const struct __kernel_timex *); + + + + + + + +extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec); +# 75 "./include/linux/time.h" 2 + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool itimerspec64_valid(const struct itimerspec64 *its) +{ + if (!timespec64_valid(&(its->it_interval)) || + !timespec64_valid(&(its->it_value))) + return false; + + return true; +} +# 114 "./include/linux/time.h" +# 1 "./include/vdso/time.h" 1 + + + + + + +struct timens_offset { + s64 sec; + u64 nsec; +}; +# 115 "./include/linux/time.h" 2 +# 20 "./include/linux/stat.h" 2 +# 1 "./include/linux/uidgid.h" 1 +# 16 "./include/linux/uidgid.h" +# 1 "./include/linux/highuid.h" 1 +# 35 "./include/linux/highuid.h" +extern int overflowuid; +extern int overflowgid; + +extern void __bad_uid(void); +extern void __bad_gid(void); +# 82 "./include/linux/highuid.h" +extern int fs_overflowuid; +extern int fs_overflowgid; +# 17 "./include/linux/uidgid.h" 2 + +struct user_namespace; +extern struct user_namespace init_user_ns; + +typedef struct { + uid_t val; +} kuid_t; + + +typedef struct { + gid_t val; +} kgid_t; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) uid_t __kuid_val(kuid_t uid) +{ + return uid.val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gid_t __kgid_val(kgid_t gid) +{ + return gid.val; +} +# 61 "./include/linux/uidgid.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_eq(kuid_t left, kuid_t right) +{ + return __kuid_val(left) == __kuid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_eq(kgid_t left, kgid_t right) +{ + return __kgid_val(left) == __kgid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_gt(kuid_t left, kuid_t right) +{ + return __kuid_val(left) > __kuid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_gt(kgid_t left, kgid_t right) +{ + return __kgid_val(left) > __kgid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_gte(kuid_t left, kuid_t right) +{ + return __kuid_val(left) >= __kuid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_gte(kgid_t left, kgid_t right) +{ + return __kgid_val(left) >= __kgid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_lt(kuid_t left, kuid_t right) +{ + return __kuid_val(left) < __kuid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_lt(kgid_t left, kgid_t right) +{ + return __kgid_val(left) < __kgid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_lte(kuid_t left, kuid_t right) +{ + return __kuid_val(left) <= __kuid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_lte(kgid_t left, kgid_t right) +{ + return __kgid_val(left) <= __kgid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uid_valid(kuid_t uid) +{ + return __kuid_val(uid) != (uid_t) -1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gid_valid(kgid_t gid) +{ + return __kgid_val(gid) != (gid_t) -1; +} + + + +extern kuid_t make_kuid(struct user_namespace *from, uid_t uid); +extern kgid_t make_kgid(struct user_namespace *from, gid_t gid); + +extern uid_t from_kuid(struct user_namespace *to, kuid_t uid); +extern gid_t from_kgid(struct user_namespace *to, kgid_t gid); +extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid); +extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) +{ + return from_kuid(ns, uid) != (uid_t) -1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) +{ + return from_kgid(ns, gid) != (gid_t) -1; +} +# 21 "./include/linux/stat.h" 2 + + + +struct kstat { + u32 result_mask; + umode_t mode; + unsigned int nlink; + uint32_t blksize; + u64 attributes; + u64 attributes_mask; +# 39 "./include/linux/stat.h" + u64 ino; + dev_t dev; + dev_t rdev; + kuid_t uid; + kgid_t gid; + loff_t size; + struct timespec64 atime; + struct timespec64 mtime; + struct timespec64 ctime; + struct timespec64 btime; + u64 blocks; + u64 mnt_id; +}; +# 11 "./include/linux/fs.h" 2 + + +# 1 "./include/linux/list_lru.h" 1 +# 12 "./include/linux/list_lru.h" +# 1 "./include/linux/nodemask.h" 1 +# 96 "./include/linux/nodemask.h" +# 1 "./include/linux/numa.h" 1 +# 25 "./include/linux/numa.h" +int numa_map_to_online_node(int node); + + + + + +int phys_to_target_node(phys_addr_t addr); +# 97 "./include/linux/nodemask.h" 2 + +typedef struct { unsigned long bits[((((1 << 10)) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; } nodemask_t; +extern nodemask_t _unused_nodemask_arg_; +# 109 "./include/linux/nodemask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __nodemask_pr_numnodes(const nodemask_t *m) +{ + return m ? (1 << 10) : 0; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const unsigned long *__nodemask_pr_bits(const nodemask_t *m) +{ + return m ? m->bits : ((void *)0); +} +# 128 "./include/linux/nodemask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __node_set(int node, volatile nodemask_t *dstp) +{ + set_bit(node, dstp->bits); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __node_clear(int node, volatile nodemask_t *dstp) +{ + clear_bit(node, dstp->bits); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_setall(nodemask_t *dstp, unsigned int nbits) +{ + bitmap_fill(dstp->bits, nbits); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_clear(nodemask_t *dstp, unsigned int nbits) +{ + bitmap_zero(dstp->bits, nbits); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __node_test_and_set(int node, nodemask_t *addr) +{ + return test_and_set_bit(node, addr->bits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_complement(nodemask_t *dstp, + const nodemask_t *srcp, unsigned int nbits) +{ + bitmap_complement(dstp->bits, srcp->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_equal(const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + return bitmap_equal(src1p->bits, src2p->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_intersects(const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + return bitmap_intersects(src1p->bits, src2p->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_subset(const nodemask_t *src1p, + const nodemask_t *src2p, unsigned int nbits) +{ + return bitmap_subset(src1p->bits, src2p->bits, nbits); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_empty(const nodemask_t *srcp, unsigned int nbits) +{ + return bitmap_empty(srcp->bits, nbits); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_full(const nodemask_t *srcp, unsigned int nbits) +{ + return bitmap_full(srcp->bits, nbits); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) +{ + return bitmap_weight(srcp->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_shift_right(nodemask_t *dstp, + const nodemask_t *srcp, int n, int nbits) +{ + bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_shift_left(nodemask_t *dstp, + const nodemask_t *srcp, int n, int nbits) +{ + bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __first_node(const nodemask_t *srcp) +{ + return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 10))) *)1 == (typeof((int)(find_first_bit(srcp->bits, (1 << 10)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 10))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_first_bit(srcp->bits, (1 << 10)))) * 0l)) : (int *)8))))), (((int)((1 << 10))) < ((int)(find_first_bit(srcp->bits, (1 << 10)))) ? ((int)((1 << 10))) : ((int)(find_first_bit(srcp->bits, (1 << 10))))), ({ typeof((int)((1 << 10))) __UNIQUE_ID___x306 = ((int)((1 << 10))); typeof((int)(find_first_bit(srcp->bits, (1 << 10)))) __UNIQUE_ID___y307 = ((int)(find_first_bit(srcp->bits, (1 << 10)))); ((__UNIQUE_ID___x306) < (__UNIQUE_ID___y307) ? (__UNIQUE_ID___x306) : (__UNIQUE_ID___y307)); })); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __next_node(int n, const nodemask_t *srcp) +{ + return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 10))) *)1 == (typeof((int)(find_next_bit(srcp->bits, (1 << 10), n+1))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 10))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_next_bit(srcp->bits, (1 << 10), n+1))) * 0l)) : (int *)8))))), (((int)((1 << 10))) < ((int)(find_next_bit(srcp->bits, (1 << 10), n+1))) ? ((int)((1 << 10))) : ((int)(find_next_bit(srcp->bits, (1 << 10), n+1)))), ({ typeof((int)((1 << 10))) __UNIQUE_ID___x308 = ((int)((1 << 10))); typeof((int)(find_next_bit(srcp->bits, (1 << 10), n+1))) __UNIQUE_ID___y309 = ((int)(find_next_bit(srcp->bits, (1 << 10), n+1))); ((__UNIQUE_ID___x308) < (__UNIQUE_ID___y309) ? (__UNIQUE_ID___x308) : (__UNIQUE_ID___y309)); })); +} + + + + + + +int __next_node_in(int node, const nodemask_t *srcp); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_nodemask_of_node(nodemask_t *mask, int node) +{ + __nodes_clear(&(*mask), (1 << 10)); + __node_set((node), &(*mask)); +} +# 299 "./include/linux/nodemask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __first_unset_node(const nodemask_t *maskp) +{ + return __builtin_choose_expr(((!!(sizeof((typeof((int)((1 << 10))) *)1 == (typeof((int)(find_first_zero_bit(maskp->bits, (1 << 10)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)((1 << 10))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(find_first_zero_bit(maskp->bits, (1 << 10)))) * 0l)) : (int *)8))))), (((int)((1 << 10))) < ((int)(find_first_zero_bit(maskp->bits, (1 << 10)))) ? ((int)((1 << 10))) : ((int)(find_first_zero_bit(maskp->bits, (1 << 10))))), ({ typeof((int)((1 << 10))) __UNIQUE_ID___x310 = ((int)((1 << 10))); typeof((int)(find_first_zero_bit(maskp->bits, (1 << 10)))) __UNIQUE_ID___y311 = ((int)(find_first_zero_bit(maskp->bits, (1 << 10)))); ((__UNIQUE_ID___x310) < (__UNIQUE_ID___y311) ? (__UNIQUE_ID___x310) : (__UNIQUE_ID___y311)); })) + ; +} +# 333 "./include/linux/nodemask.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodemask_parse_user(const char *buf, int len, + nodemask_t *dstp, int nbits) +{ + return bitmap_parse_user(buf, len, dstp->bits, nbits); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) +{ + return bitmap_parselist(buf, dstp->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __node_remap(int oldbit, + const nodemask_t *oldp, const nodemask_t *newp, int nbits) +{ + return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, + const nodemask_t *oldp, const nodemask_t *newp, int nbits) +{ + bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, + const nodemask_t *relmapp, int nbits) +{ + bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, + int sz, int nbits) +{ + bitmap_fold(dstp->bits, origp->bits, sz, nbits); +} +# 391 "./include/linux/nodemask.h" +enum node_states { + N_POSSIBLE, + N_ONLINE, + N_NORMAL_MEMORY, + + + + N_HIGH_MEMORY = N_NORMAL_MEMORY, + + N_MEMORY, + N_CPU, + NR_NODE_STATES +}; + + + + + + +extern nodemask_t node_states[NR_NODE_STATES]; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int node_state(int node, enum node_states state) +{ + return test_bit((node), (node_states[state]).bits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_set_state(int node, enum node_states state) +{ + __node_set(node, &node_states[state]); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_clear_state(int node, enum node_states state) +{ + __node_clear(node, &node_states[state]); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int num_node_state(enum node_states state) +{ + return __nodes_weight(&(node_states[state]), (1 << 10)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int next_online_node(int nid) +{ + return __next_node((nid), &(node_states[N_ONLINE])); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int next_memory_node(int nid) +{ + return __next_node((nid), &(node_states[N_MEMORY])); +} + +extern unsigned int nr_node_ids; +extern unsigned int nr_online_nodes; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_set_online(int nid) +{ + node_set_state(nid, N_ONLINE); + nr_online_nodes = num_node_state(N_ONLINE); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_set_offline(int nid) +{ + node_clear_state(nid, N_ONLINE); + nr_online_nodes = num_node_state(N_ONLINE); +} +# 497 "./include/linux/nodemask.h" +extern int node_random(const nodemask_t *maskp); +# 531 "./include/linux/nodemask.h" +struct nodemask_scratch { + nodemask_t mask1; + nodemask_t mask2; +}; +# 13 "./include/linux/list_lru.h" 2 +# 1 "./include/linux/shrinker.h" 1 +# 12 "./include/linux/shrinker.h" +struct shrink_control { + gfp_t gfp_mask; + + + int nid; + + + + + + + unsigned long nr_to_scan; + + + + + + + unsigned long nr_scanned; + + + struct mem_cgroup *memcg; +}; +# 60 "./include/linux/shrinker.h" +struct shrinker { + unsigned long (*count_objects)(struct shrinker *, + struct shrink_control *sc); + unsigned long (*scan_objects)(struct shrinker *, + struct shrink_control *sc); + + long batch; + int seeks; + unsigned flags; + + + struct list_head list; + + + int id; + + + atomic_long_t *nr_deferred; +}; +# 90 "./include/linux/shrinker.h" +extern int prealloc_shrinker(struct shrinker *shrinker); +extern void register_shrinker_prepared(struct shrinker *shrinker); +extern int register_shrinker(struct shrinker *shrinker); +extern void unregister_shrinker(struct shrinker *shrinker); +extern void free_prealloced_shrinker(struct shrinker *shrinker); +# 14 "./include/linux/list_lru.h" 2 + +struct mem_cgroup; + + +enum lru_status { + LRU_REMOVED, + LRU_REMOVED_RETRY, + + LRU_ROTATE, + LRU_SKIP, + LRU_RETRY, + +}; + +struct list_lru_one { + struct list_head list; + + long nr_items; +}; + +struct list_lru_memcg { + struct callback_head rcu; + + struct list_lru_one *lru[]; +}; + +struct list_lru_node { + + spinlock_t lock; + + struct list_lru_one lru; + + + struct list_lru_memcg *memcg_lrus; + + long nr_items; +} __attribute__((__aligned__((1 << (6))))); + +struct list_lru { + struct list_lru_node *node; + + struct list_head list; + int shrinker_id; + bool memcg_aware; + +}; + +void list_lru_destroy(struct list_lru *lru); +int __list_lru_init(struct list_lru *lru, bool memcg_aware, + struct lock_class_key *key, struct shrinker *shrinker); +# 72 "./include/linux/list_lru.h" +int memcg_update_all_list_lrus(int num_memcgs); +void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg); +# 91 "./include/linux/list_lru.h" +bool list_lru_add(struct list_lru *lru, struct list_head *item); +# 104 "./include/linux/list_lru.h" +bool list_lru_del(struct list_lru *lru, struct list_head *item); +# 116 "./include/linux/list_lru.h" +unsigned long list_lru_count_one(struct list_lru *lru, + int nid, struct mem_cgroup *memcg); +unsigned long list_lru_count_node(struct list_lru *lru, int nid); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long list_lru_shrink_count(struct list_lru *lru, + struct shrink_control *sc) +{ + return list_lru_count_one(lru, sc->nid, sc->memcg); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long list_lru_count(struct list_lru *lru) +{ + long count = 0; + int nid; + + for (((nid)) = __first_node(&(node_states[N_NORMAL_MEMORY])); ((nid)) < (1 << 10); ((nid)) = __next_node((((nid))), &((node_states[N_NORMAL_MEMORY])))) + count += list_lru_count_node(lru, nid); + + return count; +} + +void list_lru_isolate(struct list_lru_one *list, struct list_head *item); +void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, + struct list_head *head); + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, + struct list_lru_one *list, spinlock_t *lock, void *cb_arg); +# 166 "./include/linux/list_lru.h" +unsigned long list_lru_walk_one(struct list_lru *lru, + int nid, struct mem_cgroup *memcg, + list_lru_walk_cb isolate, void *cb_arg, + unsigned long *nr_to_walk); +# 183 "./include/linux/list_lru.h" +unsigned long list_lru_walk_one_irq(struct list_lru *lru, + int nid, struct mem_cgroup *memcg, + list_lru_walk_cb isolate, void *cb_arg, + unsigned long *nr_to_walk); +unsigned long list_lru_walk_node(struct list_lru *lru, int nid, + list_lru_walk_cb isolate, void *cb_arg, + unsigned long *nr_to_walk); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, + list_lru_walk_cb isolate, void *cb_arg) +{ + return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, + &sc->nr_to_scan); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, + list_lru_walk_cb isolate, void *cb_arg) +{ + return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, + &sc->nr_to_scan); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, + void *cb_arg, unsigned long nr_to_walk) +{ + long isolated = 0; + int nid; + + for (((nid)) = __first_node(&(node_states[N_NORMAL_MEMORY])); ((nid)) < (1 << 10); ((nid)) = __next_node((((nid))), &((node_states[N_NORMAL_MEMORY])))) { + isolated += list_lru_walk_node(lru, nid, isolate, + cb_arg, &nr_to_walk); + if (nr_to_walk <= 0) + break; + } + return isolated; +} +# 14 "./include/linux/fs.h" 2 +# 1 "./include/linux/llist.h" 1 +# 54 "./include/linux/llist.h" +struct llist_head { + struct llist_node *first; +}; + +struct llist_node { + struct llist_node *next; +}; +# 69 "./include/linux/llist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_llist_head(struct llist_head *list) +{ + list->first = ((void *)0); +} +# 187 "./include/linux/llist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool llist_empty(const struct llist_head *head) +{ + return ({ do { extern void __compiletime_assert_312(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(head->first) == sizeof(char) || sizeof(head->first) == sizeof(short) || sizeof(head->first) == sizeof(int) || sizeof(head->first) == sizeof(long)) || sizeof(head->first) == sizeof(long long))) __compiletime_assert_312(); } while (0); ({ typeof( _Generic((head->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head->first))) __x = (*(const volatile typeof( _Generic((head->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head->first))) *)&(head->first)); do { } while (0); (typeof(head->first))__x; }); }) == ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct llist_node *llist_next(struct llist_node *node) +{ + return node->next; +} + +extern bool llist_add_batch(struct llist_node *new_first, + struct llist_node *new_last, + struct llist_head *head); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool llist_add(struct llist_node *new, struct llist_head *head) +{ + return llist_add_batch(new, new, head); +} +# 220 "./include/linux/llist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct llist_node *llist_del_all(struct llist_head *head) +{ + return ({ typeof(&head->first) __ai_ptr = (&head->first); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = ((((void *)0))); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); }); +} + +extern struct llist_node *llist_del_first(struct llist_head *head); + +struct llist_node *llist_reverse_order(struct llist_node *head); +# 15 "./include/linux/fs.h" 2 +# 1 "./include/linux/radix-tree.h" 1 +# 18 "./include/linux/radix-tree.h" +# 1 "./include/linux/xarray.h" 1 +# 14 "./include/linux/xarray.h" +# 1 "./include/linux/gfp.h" 1 + + + + +# 1 "./include/linux/mmdebug.h" 1 + + + + + + + +struct page; +struct vm_area_struct; +struct mm_struct; + +extern void dump_page(struct page *page, const char *reason); +extern void __dump_page(struct page *page, const char *reason); +void dump_vma(const struct vm_area_struct *vma); +void dump_mm(const struct mm_struct *mm); +# 6 "./include/linux/gfp.h" 2 +# 1 "./include/linux/mmzone.h" 1 +# 18 "./include/linux/mmzone.h" +# 1 "./include/linux/pageblock-flags.h" 1 +# 18 "./include/linux/pageblock-flags.h" +enum pageblock_bits { + PB_migrate, + PB_migrate_end = PB_migrate + 3 - 1, + + PB_migrate_skip, + + + + + + NR_PAGEBLOCK_BITS +}; +# 55 "./include/linux/pageblock-flags.h" +struct page; + +unsigned long get_pfnblock_flags_mask(struct page *page, + unsigned long pfn, + unsigned long end_bitidx, + unsigned long mask); + +void set_pfnblock_flags_mask(struct page *page, + unsigned long flags, + unsigned long pfn, + unsigned long end_bitidx, + unsigned long mask); +# 19 "./include/linux/mmzone.h" 2 +# 1 "./include/linux/page-flags-layout.h" 1 +# 20 "./include/linux/mmzone.h" 2 + +# 1 "./include/linux/mm_types.h" 1 + + + + +# 1 "./include/linux/mm_types_task.h" 1 +# 19 "./include/linux/mm_types_task.h" +# 1 "./arch/x86/include/asm/tlbbatch.h" 1 + + + + + + +struct arch_tlbflush_unmap_batch { + + + + + struct cpumask cpumask; +}; +# 20 "./include/linux/mm_types_task.h" 2 +# 34 "./include/linux/mm_types_task.h" +struct vmacache { + u64 seqnum; + struct vm_area_struct *vmas[(1U << 2)]; +}; + + + + + +enum { + MM_FILEPAGES, + MM_ANONPAGES, + MM_SWAPENTS, + MM_SHMEMPAGES, + NR_MM_COUNTERS +}; + + + + +struct task_rss_stat { + int events; + int count[NR_MM_COUNTERS]; +}; + + +struct mm_rss_stat { + atomic_long_t count[NR_MM_COUNTERS]; +}; + +struct page_frag { + struct page *page; + + __u32 offset; + __u32 size; + + + + +}; + + +struct tlbflush_unmap_batch { +# 85 "./include/linux/mm_types_task.h" + struct arch_tlbflush_unmap_batch arch; + + + bool flush_required; + + + + + + + bool writable; + +}; +# 6 "./include/linux/mm_types.h" 2 + +# 1 "./include/linux/auxvec.h" 1 + + + + +# 1 "./include/uapi/linux/auxvec.h" 1 + + + + +# 1 "./arch/x86/include/uapi/asm/auxvec.h" 1 +# 6 "./include/uapi/linux/auxvec.h" 2 +# 6 "./include/linux/auxvec.h" 2 +# 8 "./include/linux/mm_types.h" 2 + + +# 1 "./include/linux/rbtree.h" 1 +# 24 "./include/linux/rbtree.h" +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + + +struct rb_root { + struct rb_node *rb_node; +}; +# 49 "./include/linux/rbtree.h" +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + + + +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + + +extern struct rb_node *rb_first_postorder(const struct rb_root *); +extern struct rb_node *rb_next_postorder(const struct rb_node *); + + +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); +extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rb_link_node(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = ((void *)0); + + *rb_link = node; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = ((void *)0); + + do { uintptr_t _r_a_p__v = (uintptr_t)(node); ; if (__builtin_constant_p(node) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_313(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((*rb_link)) == sizeof(char) || sizeof((*rb_link)) == sizeof(short) || sizeof((*rb_link)) == sizeof(int) || sizeof((*rb_link)) == sizeof(long)) || sizeof((*rb_link)) == sizeof(long long))) __compiletime_assert_313(); } while (0); do { *(volatile typeof((*rb_link)) *)&((*rb_link)) = ((typeof(*rb_link))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_314(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&*rb_link) == sizeof(char) || sizeof(*&*rb_link) == sizeof(short) || sizeof(*&*rb_link) == sizeof(int) || sizeof(*&*rb_link) == sizeof(long)))) __compiletime_assert_314(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_315(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&*rb_link) == sizeof(char) || sizeof(*&*rb_link) == sizeof(short) || sizeof(*&*rb_link) == sizeof(int) || sizeof(*&*rb_link) == sizeof(long)) || sizeof(*&*rb_link) == sizeof(long long))) __compiletime_assert_315(); } while (0); do { *(volatile typeof(*&*rb_link) *)&(*&*rb_link) = ((typeof(*((typeof(*rb_link))_r_a_p__v)) *)((typeof(*rb_link))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); +} +# 125 "./include/linux/rbtree.h" +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rb_insert_color_cached(struct rb_node *node, + struct rb_root_cached *root, + bool leftmost) +{ + if (leftmost) + root->rb_leftmost = node; + rb_insert_color(node, &root->rb_root); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rb_erase_cached(struct rb_node *node, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase(node, &root->rb_root); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rb_replace_node_cached(struct rb_node *victim, + struct rb_node *new, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == victim) + root->rb_leftmost = new; + rb_replace_node(victim, new, &root->rb_root); +} +# 11 "./include/linux/mm_types.h" 2 +# 1 "./include/linux/rwsem.h" 1 +# 20 "./include/linux/rwsem.h" +# 1 "./include/linux/osq_lock.h" 1 +# 9 "./include/linux/osq_lock.h" +struct optimistic_spin_node { + struct optimistic_spin_node *next, *prev; + int locked; + int cpu; +}; + +struct optimistic_spin_queue { + + + + + atomic_t tail; +}; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void osq_lock_init(struct optimistic_spin_queue *lock) +{ + atomic_set(&lock->tail, (0)); +} + +extern bool osq_lock(struct optimistic_spin_queue *lock); +extern void osq_unlock(struct optimistic_spin_queue *lock); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool osq_is_locked(struct optimistic_spin_queue *lock) +{ + return atomic_read(&lock->tail) != (0); +} +# 21 "./include/linux/rwsem.h" 2 +# 35 "./include/linux/rwsem.h" +struct rw_semaphore { + atomic_long_t count; + + + + + + atomic_long_t owner; + + struct optimistic_spin_queue osq; + + raw_spinlock_t wait_lock; + struct list_head wait_list; + + void *magic; + + + struct lockdep_map dep_map; + +}; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rwsem_is_locked(struct rw_semaphore *sem) +{ + return atomic_long_read(&sem->count) != 0; +} +# 101 "./include/linux/rwsem.h" +extern void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key); +# 117 "./include/linux/rwsem.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rwsem_is_contended(struct rw_semaphore *sem) +{ + return !list_empty(&sem->wait_list); +} + + + + +extern void down_read(struct rw_semaphore *sem); +extern int __attribute__((__warn_unused_result__)) down_read_killable(struct rw_semaphore *sem); + + + + +extern int down_read_trylock(struct rw_semaphore *sem); + + + + +extern void down_write(struct rw_semaphore *sem); +extern int __attribute__((__warn_unused_result__)) down_write_killable(struct rw_semaphore *sem); + + + + +extern int down_write_trylock(struct rw_semaphore *sem); + + + + +extern void up_read(struct rw_semaphore *sem); + + + + +extern void up_write(struct rw_semaphore *sem); + + + + +extern void downgrade_write(struct rw_semaphore *sem); +# 173 "./include/linux/rwsem.h" +extern void down_read_nested(struct rw_semaphore *sem, int subclass); +extern void down_write_nested(struct rw_semaphore *sem, int subclass); +extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); +extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); +# 190 "./include/linux/rwsem.h" +extern void down_read_non_owner(struct rw_semaphore *sem); +extern void up_read_non_owner(struct rw_semaphore *sem); +# 12 "./include/linux/mm_types.h" 2 +# 1 "./include/linux/completion.h" 1 +# 12 "./include/linux/completion.h" +# 1 "./include/linux/swait.h" 1 +# 41 "./include/linux/swait.h" +struct task_struct; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; +# 69 "./include/linux/swait.h" +extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, + struct lock_class_key *key); +# 121 "./include/linux/swait.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int swait_active(struct swait_queue_head *wq) +{ + return !list_empty(&wq->task_list); +} +# 134 "./include/linux/swait.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool swq_has_sleeper(struct swait_queue_head *wq) +{ + + + + + + + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + return swait_active(wq); +} + +extern void swake_up_one(struct swait_queue_head *q); +extern void swake_up_all(struct swait_queue_head *q); +extern void swake_up_locked(struct swait_queue_head *q); + +extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); +extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); + +extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); +extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); +# 13 "./include/linux/completion.h" 2 +# 26 "./include/linux/completion.h" +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void complete_acquire(struct completion *x) {} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void complete_release(struct completion *x) {} +# 85 "./include/linux/completion.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __init_completion(struct completion *x) +{ + x->done = 0; + do { static struct lock_class_key __key; __init_swait_queue_head((&x->wait), "&x->wait", &__key); } while (0); +} +# 98 "./include/linux/completion.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void reinit_completion(struct completion *x) +{ + x->done = 0; +} + +extern void wait_for_completion(struct completion *); +extern void wait_for_completion_io(struct completion *); +extern int wait_for_completion_interruptible(struct completion *x); +extern int wait_for_completion_killable(struct completion *x); +extern unsigned long wait_for_completion_timeout(struct completion *x, + unsigned long timeout); +extern unsigned long wait_for_completion_io_timeout(struct completion *x, + unsigned long timeout); +extern long wait_for_completion_interruptible_timeout( + struct completion *x, unsigned long timeout); +extern long wait_for_completion_killable_timeout( + struct completion *x, unsigned long timeout); +extern bool try_wait_for_completion(struct completion *x); +extern bool completion_done(struct completion *x); + +extern void complete(struct completion *); +extern void complete_all(struct completion *); +# 13 "./include/linux/mm_types.h" 2 + +# 1 "./include/linux/uprobes.h" 1 +# 19 "./include/linux/uprobes.h" +struct vm_area_struct; +struct mm_struct; +struct inode; +struct notifier_block; +struct page; + + + + + + +enum uprobe_filter_ctx { + UPROBE_FILTER_REGISTER, + UPROBE_FILTER_UNREGISTER, + UPROBE_FILTER_MMAP, +}; + +struct uprobe_consumer { + int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); + int (*ret_handler)(struct uprobe_consumer *self, + unsigned long func, + struct pt_regs *regs); + bool (*filter)(struct uprobe_consumer *self, + enum uprobe_filter_ctx ctx, + struct mm_struct *mm); + + struct uprobe_consumer *next; +}; + + +# 1 "./arch/x86/include/asm/uprobes.h" 1 +# 13 "./arch/x86/include/asm/uprobes.h" +# 1 "./include/linux/notifier.h" 1 +# 14 "./include/linux/notifier.h" +# 1 "./include/linux/mutex.h" 1 +# 23 "./include/linux/mutex.h" +struct ww_acquire_ctx; +# 53 "./include/linux/mutex.h" +struct mutex { + atomic_long_t owner; + spinlock_t wait_lock; + + struct optimistic_spin_queue osq; + + struct list_head wait_list; + + void *magic; + + + struct lockdep_map dep_map; + +}; + + + + + +struct mutex_waiter { + struct list_head list; + struct task_struct *task; + struct ww_acquire_ctx *ww_ctx; + + void *magic; + +}; + + + + + + +extern void mutex_destroy(struct mutex *lock); +# 131 "./include/linux/mutex.h" +extern void __mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); + + + + + + + +extern bool mutex_is_locked(struct mutex *lock); + + + + + + +extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); +extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); + +extern int __attribute__((__warn_unused_result__)) mutex_lock_interruptible_nested(struct mutex *lock, + unsigned int subclass); +extern int __attribute__((__warn_unused_result__)) mutex_lock_killable_nested(struct mutex *lock, + unsigned int subclass); +extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass); +# 186 "./include/linux/mutex.h" +extern int mutex_trylock(struct mutex *lock); +extern void mutex_unlock(struct mutex *lock); + +extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + + + + + +enum mutex_trylock_recursive_enum { + MUTEX_TRYLOCK_FAILED = 0, + MUTEX_TRYLOCK_SUCCESS = 1, + MUTEX_TRYLOCK_RECURSIVE, +}; +# 213 "./include/linux/mutex.h" +extern __attribute__((__warn_unused_result__)) enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock); +# 15 "./include/linux/notifier.h" 2 + +# 1 "./include/linux/srcu.h" 1 +# 21 "./include/linux/srcu.h" +# 1 "./include/linux/workqueue.h" 1 +# 9 "./include/linux/workqueue.h" +# 1 "./include/linux/timer.h" 1 + + + + + +# 1 "./include/linux/ktime.h" 1 +# 25 "./include/linux/ktime.h" +# 1 "./include/linux/jiffies.h" 1 +# 11 "./include/linux/jiffies.h" +# 1 "./include/vdso/jiffies.h" 1 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/param.h" 1 +# 6 "./include/vdso/jiffies.h" 2 +# 12 "./include/linux/jiffies.h" 2 +# 1 "./arch/x86/include/generated/uapi/asm/param.h" 1 +# 13 "./include/linux/jiffies.h" 2 +# 1 "./include/generated/timeconst.h" 1 +# 14 "./include/linux/jiffies.h" 2 +# 61 "./include/linux/jiffies.h" +extern int register_refined_jiffies(long clock_tick_rate); +# 78 "./include/linux/jiffies.h" +extern u64 __attribute__((__aligned__((1 << 12)))) __attribute__((__section__(".data..page_aligned"))) __attribute__((__aligned__(((1UL) << 12)))) jiffies_64; +extern unsigned long volatile __attribute__((__aligned__((1 << 12)))) __attribute__((__section__(".data..page_aligned"))) __attribute__((__aligned__(((1UL) << 12)))) jiffies; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 get_jiffies_64(void) +{ + return (u64)jiffies; +} +# 188 "./include/linux/jiffies.h" +extern unsigned long preset_lpj; +# 289 "./include/linux/jiffies.h" +extern unsigned int jiffies_to_msecs(const unsigned long j); +extern unsigned int jiffies_to_usecs(const unsigned long j); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 jiffies_to_nsecs(const unsigned long j) +{ + return (u64)jiffies_to_usecs(j) * 1000L; +} + +extern u64 jiffies64_to_nsecs(u64 j); +extern u64 jiffies64_to_msecs(u64 j); + +extern unsigned long __msecs_to_jiffies(const unsigned int m); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long _msecs_to_jiffies(const unsigned int m) +{ + return (m + (1000L / 250) - 1) / (1000L / 250); +} +# 362 "./include/linux/jiffies.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long msecs_to_jiffies(const unsigned int m) +{ + if (__builtin_constant_p(m)) { + if ((int)m < 0) + return ((((long)(~0UL >> 1)) >> 1)-1); + return _msecs_to_jiffies(m); + } else { + return __msecs_to_jiffies(m); + } +} + +extern unsigned long __usecs_to_jiffies(const unsigned int u); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long _usecs_to_jiffies(const unsigned int u) +{ + return (u + (1000000L / 250) - 1) / (1000000L / 250); +} +# 409 "./include/linux/jiffies.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long usecs_to_jiffies(const unsigned int u) +{ + if (__builtin_constant_p(u)) { + if (u > jiffies_to_usecs(((((long)(~0UL >> 1)) >> 1)-1))) + return ((((long)(~0UL >> 1)) >> 1)-1); + return _usecs_to_jiffies(u); + } else { + return __usecs_to_jiffies(u); + } +} + +extern unsigned long timespec64_to_jiffies(const struct timespec64 *value); +extern void jiffies_to_timespec64(const unsigned long jiffies, + struct timespec64 *value); +extern clock_t jiffies_to_clock_t(unsigned long x); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) clock_t jiffies_delta_to_clock_t(long delta) +{ + return jiffies_to_clock_t(__builtin_choose_expr(((!!(sizeof((typeof(0L) *)1 == (typeof(delta) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(0L) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(delta) * 0l)) : (int *)8))))), ((0L) > (delta) ? (0L) : (delta)), ({ typeof(0L) __UNIQUE_ID___x316 = (0L); typeof(delta) __UNIQUE_ID___y317 = (delta); ((__UNIQUE_ID___x316) > (__UNIQUE_ID___y317) ? (__UNIQUE_ID___x316) : (__UNIQUE_ID___y317)); }))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int jiffies_delta_to_msecs(long delta) +{ + return jiffies_to_msecs(__builtin_choose_expr(((!!(sizeof((typeof(0L) *)1 == (typeof(delta) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(0L) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(delta) * 0l)) : (int *)8))))), ((0L) > (delta) ? (0L) : (delta)), ({ typeof(0L) __UNIQUE_ID___x318 = (0L); typeof(delta) __UNIQUE_ID___y319 = (delta); ((__UNIQUE_ID___x318) > (__UNIQUE_ID___y319) ? (__UNIQUE_ID___x318) : (__UNIQUE_ID___y319)); }))); +} + +extern unsigned long clock_t_to_jiffies(unsigned long x); +extern u64 jiffies_64_to_clock_t(u64 x); +extern u64 nsec_to_clock_t(u64 x); +extern u64 nsecs_to_jiffies64(u64 n); +extern unsigned long nsecs_to_jiffies(u64 n); +# 26 "./include/linux/ktime.h" 2 + + +typedef s64 ktime_t; +# 37 "./include/linux/ktime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_set(const s64 secs, const unsigned long nsecs) +{ + if (__builtin_expect(!!(secs >= (((s64)~((u64)1 << 63)) / 1000000000L)), 0)) + return ((s64)~((u64)1 << 63)); + + return secs * 1000000000L + (s64)nsecs; +} +# 70 "./include/linux/ktime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t timespec64_to_ktime(struct timespec64 ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_to_ns(const ktime_t kt) +{ + return kt; +} +# 94 "./include/linux/ktime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) +{ + if (cmp1 < cmp2) + return -1; + if (cmp1 > cmp2) + return 1; + return 0; +} +# 110 "./include/linux/ktime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ktime_after(const ktime_t cmp1, const ktime_t cmp2) +{ + return ktime_compare(cmp1, cmp2) > 0; +} +# 122 "./include/linux/ktime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) +{ + return ktime_compare(cmp1, cmp2) < 0; +} +# 147 "./include/linux/ktime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_divns(const ktime_t kt, s64 div) +{ + + + + + ({ int __ret_warn_on = !!(div < 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (320)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/ktime.h"), "i" (153), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (321)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (322)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return kt / div; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_to_us(const ktime_t kt) +{ + return ktime_divns(kt, 1000L); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_to_ms(const ktime_t kt) +{ + return ktime_divns(kt, 1000000L); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) +{ + return ktime_to_us(((later) - (earlier))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier) +{ + return ktime_to_ms(((later) - (earlier))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec) +{ + return ((kt) + (usec * 1000L)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_add_ms(const ktime_t kt, const u64 msec) +{ + return ((kt) + (msec * 1000000L)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) +{ + return ((kt) - (usec * 1000L)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec) +{ + return ((kt) - (msec * 1000000L)); +} + +extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); +# 208 "./include/linux/ktime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool ktime_to_timespec64_cond(const ktime_t kt, + struct timespec64 *ts) +{ + if (kt) { + *ts = ns_to_timespec64((kt)); + return true; + } else { + return false; + } +} + +# 1 "./include/vdso/ktime.h" 1 +# 220 "./include/linux/ktime.h" 2 + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ns_to_ktime(u64 ns) +{ + return ns; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ms_to_ktime(u64 ms) +{ + return ms * 1000000L; +} + +# 1 "./include/linux/timekeeping.h" 1 +# 9 "./include/linux/timekeeping.h" +void timekeeping_init(void); +extern int timekeeping_suspended; + + +extern void update_process_times(int user); +extern void xtime_update(unsigned long ticks); + + + + +extern int do_settimeofday64(const struct timespec64 *ts); +extern int do_sys_settimeofday64(const struct timespec64 *tv, + const struct timezone *tz); +# 41 "./include/linux/timekeeping.h" +extern void ktime_get_raw_ts64(struct timespec64 *ts); +extern void ktime_get_ts64(struct timespec64 *ts); +extern void ktime_get_real_ts64(struct timespec64 *tv); +extern void ktime_get_coarse_ts64(struct timespec64 *ts); +extern void ktime_get_coarse_real_ts64(struct timespec64 *ts); + +void getboottime64(struct timespec64 *ts); + + + + +extern time64_t ktime_get_seconds(void); +extern time64_t __ktime_get_real_seconds(void); +extern time64_t ktime_get_real_seconds(void); + + + + + +enum tk_offsets { + TK_OFFS_REAL, + TK_OFFS_BOOT, + TK_OFFS_TAI, + TK_OFFS_MAX, +}; + +extern ktime_t ktime_get(void); +extern ktime_t ktime_get_with_offset(enum tk_offsets offs); +extern ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs); +extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); +extern ktime_t ktime_get_raw(void); +extern u32 ktime_get_resolution_ns(void); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_real(void) +{ + return ktime_get_with_offset(TK_OFFS_REAL); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_coarse_real(void) +{ + return ktime_get_coarse_with_offset(TK_OFFS_REAL); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_boottime(void) +{ + return ktime_get_with_offset(TK_OFFS_BOOT); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_coarse_boottime(void) +{ + return ktime_get_coarse_with_offset(TK_OFFS_BOOT); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_clocktai(void) +{ + return ktime_get_with_offset(TK_OFFS_TAI); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_coarse_clocktai(void) +{ + return ktime_get_coarse_with_offset(TK_OFFS_TAI); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_get_coarse(void) +{ + struct timespec64 ts; + + ktime_get_coarse_ts64(&ts); + return timespec64_to_ktime(ts); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_coarse_ns(void) +{ + return ktime_to_ns(ktime_get_coarse()); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_coarse_real_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_real()); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_coarse_boottime_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_boottime()); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_coarse_clocktai_ns(void) +{ + return ktime_to_ns(ktime_get_coarse_clocktai()); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t ktime_mono_to_real(ktime_t mono) +{ + return ktime_mono_to_any(mono, TK_OFFS_REAL); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_boottime_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_clocktai_ns(void) +{ + return ktime_to_ns(ktime_get_clocktai()); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ktime_get_raw_ns(void) +{ + return ktime_to_ns(ktime_get_raw()); +} + +extern u64 ktime_get_mono_fast_ns(void); +extern u64 ktime_get_raw_fast_ns(void); +extern u64 ktime_get_boot_fast_ns(void); +extern u64 ktime_get_real_fast_ns(void); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_boottime_ts64(struct timespec64 *ts) +{ + *ts = ns_to_timespec64((ktime_get_boottime())); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_coarse_boottime_ts64(struct timespec64 *ts) +{ + *ts = ns_to_timespec64((ktime_get_coarse_boottime())); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) time64_t ktime_get_boottime_seconds(void) +{ + return ktime_divns(ktime_get_coarse_boottime(), 1000000000L); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_clocktai_ts64(struct timespec64 *ts) +{ + *ts = ns_to_timespec64((ktime_get_clocktai())); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ktime_get_coarse_clocktai_ts64(struct timespec64 *ts) +{ + *ts = ns_to_timespec64((ktime_get_coarse_clocktai())); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) time64_t ktime_get_clocktai_seconds(void) +{ + return ktime_divns(ktime_get_coarse_clocktai(), 1000000000L); +} + + + + +extern bool timekeeping_rtc_skipsuspend(void); +extern bool timekeeping_rtc_skipresume(void); + +extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); +# 234 "./include/linux/timekeeping.h" +struct system_time_snapshot { + u64 cycles; + ktime_t real; + ktime_t raw; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; +}; +# 249 "./include/linux/timekeeping.h" +struct system_device_crosststamp { + ktime_t device; + ktime_t sys_realtime; + ktime_t sys_monoraw; +}; +# 262 "./include/linux/timekeeping.h" +struct system_counterval_t { + u64 cycles; + struct clocksource *cs; +}; + + + + +extern int get_device_system_crosststamp( + int (*get_time_fn)(ktime_t *device_time, + struct system_counterval_t *system_counterval, + void *ctx), + void *ctx, + struct system_time_snapshot *history, + struct system_device_crosststamp *xtstamp); + + + + +extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); + + + + +extern int persistent_clock_is_local; + +extern void read_persistent_clock64(struct timespec64 *ts); +void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock, + struct timespec64 *boot_offset); +extern int update_persistent_clock64(struct timespec64 now); +# 232 "./include/linux/ktime.h" 2 +# 1 "./include/linux/timekeeping32.h" 1 +# 9 "./include/linux/timekeeping32.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_seconds(void) +{ + return ktime_get_real_seconds(); +} +# 233 "./include/linux/ktime.h" 2 +# 7 "./include/linux/timer.h" 2 + +# 1 "./include/linux/debugobjects.h" 1 + + + + + + + +enum debug_obj_state { + ODEBUG_STATE_NONE, + ODEBUG_STATE_INIT, + ODEBUG_STATE_INACTIVE, + ODEBUG_STATE_ACTIVE, + ODEBUG_STATE_DESTROYED, + ODEBUG_STATE_NOTAVAILABLE, + ODEBUG_STATE_MAX, +}; + +struct debug_obj_descr; +# 28 "./include/linux/debugobjects.h" +struct debug_obj { + struct hlist_node node; + enum debug_obj_state state; + unsigned int astate; + void *object; + struct debug_obj_descr *descr; +}; +# 55 "./include/linux/debugobjects.h" +struct debug_obj_descr { + const char *name; + void *(*debug_hint)(void *addr); + bool (*is_static_object)(void *addr); + bool (*fixup_init)(void *addr, enum debug_obj_state state); + bool (*fixup_activate)(void *addr, enum debug_obj_state state); + bool (*fixup_destroy)(void *addr, enum debug_obj_state state); + bool (*fixup_free)(void *addr, enum debug_obj_state state); + bool (*fixup_assert_init)(void *addr, enum debug_obj_state state); +}; + + +extern void debug_object_init (void *addr, struct debug_obj_descr *descr); +extern void +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); +extern int debug_object_activate (void *addr, struct debug_obj_descr *descr); +extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); +extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); +extern void debug_object_free (void *addr, struct debug_obj_descr *descr); +extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr); + + + + + + +extern void +debug_object_active_state(void *addr, struct debug_obj_descr *descr, + unsigned int expect, unsigned int next); + +extern void debug_objects_early_init(void); +extern void debug_objects_mem_init(void); +# 108 "./include/linux/debugobjects.h" +extern void debug_check_no_obj_freed(const void *address, unsigned long size); +# 9 "./include/linux/timer.h" 2 + + +struct timer_list { + + + + + struct hlist_node entry; + unsigned long expires; + void (*function)(struct timer_list *); + u32 flags; + + + struct lockdep_map lockdep_map; + +}; +# 90 "./include/linux/timer.h" +void init_timer_key(struct timer_list *timer, + void (*func)(struct timer_list *), unsigned int flags, + const char *name, struct lock_class_key *key); + + +extern void init_timer_on_stack_key(struct timer_list *timer, + void (*func)(struct timer_list *), + unsigned int flags, const char *name, + struct lock_class_key *key); +# 147 "./include/linux/timer.h" +extern void destroy_timer_on_stack(struct timer_list *timer); +# 165 "./include/linux/timer.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int timer_pending(const struct timer_list * timer) +{ + return !hlist_unhashed_lockless(&timer->entry); +} + +extern void add_timer_on(struct timer_list *timer, int cpu); +extern int del_timer(struct timer_list * timer); +extern int mod_timer(struct timer_list *timer, unsigned long expires); +extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); +extern int timer_reduce(struct timer_list *timer, unsigned long expires); + + + + + + + +extern void add_timer(struct timer_list *timer); + +extern int try_to_del_timer_sync(struct timer_list *timer); + + + extern int del_timer_sync(struct timer_list *timer); + + + + + + +extern void init_timers(void); +extern void run_local_timers(void); +struct hrtimer; +extern enum hrtimer_restart it_real_fn(struct hrtimer *); + + +struct ctl_table; + +extern unsigned int sysctl_timer_migration; +int timer_migration_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); + + +unsigned long __round_jiffies(unsigned long j, int cpu); +unsigned long __round_jiffies_relative(unsigned long j, int cpu); +unsigned long round_jiffies(unsigned long j); +unsigned long round_jiffies_relative(unsigned long j); + +unsigned long __round_jiffies_up(unsigned long j, int cpu); +unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); +unsigned long round_jiffies_up(unsigned long j); +unsigned long round_jiffies_up_relative(unsigned long j); + + +int timers_prepare_cpu(unsigned int cpu); +int timers_dead_cpu(unsigned int cpu); +# 10 "./include/linux/workqueue.h" 2 +# 18 "./include/linux/workqueue.h" +struct workqueue_struct; + +struct work_struct; +typedef void (*work_func_t)(struct work_struct *work); +void delayed_work_timer_fn(struct timer_list *t); + + + + + + + +enum { + WORK_STRUCT_PENDING_BIT = 0, + WORK_STRUCT_DELAYED_BIT = 1, + WORK_STRUCT_PWQ_BIT = 2, + WORK_STRUCT_LINKED_BIT = 3, + + WORK_STRUCT_STATIC_BIT = 4, + WORK_STRUCT_COLOR_SHIFT = 5, + + + + + WORK_STRUCT_COLOR_BITS = 4, + + WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, + WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, + WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, + WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, + + WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, +# 58 "./include/linux/workqueue.h" + WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, + WORK_NO_COLOR = WORK_NR_COLORS, + + + WORK_CPU_UNBOUND = 8192, + + + + + + + WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + + WORK_STRUCT_COLOR_BITS, + + + WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, + + __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, + WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), + + + + + + + WORK_OFFQ_FLAG_BITS = 1, + WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, + WORK_OFFQ_LEFT = 64 - WORK_OFFQ_POOL_SHIFT, + WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, + WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, + + + WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, + WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, + WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, + + + WORK_BUSY_PENDING = 1 << 0, + WORK_BUSY_RUNNING = 1 << 1, + + + WORKER_DESC_LEN = 24, +}; + +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; + + struct lockdep_map lockdep_map; + +}; + + + + + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + + + struct workqueue_struct *wq; + int cpu; +}; + +struct rcu_work { + struct work_struct work; + struct callback_head rcu; + + + struct workqueue_struct *wq; +}; + + + + + + +struct workqueue_attrs { + + + + int nice; + + + + + cpumask_var_t cpumask; +# 155 "./include/linux/workqueue.h" + bool no_numa; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct delayed_work *to_delayed_work(struct work_struct *work) +{ + return ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_323(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct delayed_work *)0)->work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_323(); } while (0); ((struct delayed_work *)(__mptr - __builtin_offsetof(struct delayed_work, work))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rcu_work *to_rcu_work(struct work_struct *work) +{ + return ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_324(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct rcu_work *)0)->work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_324(); } while (0); ((struct rcu_work *)(__mptr - __builtin_offsetof(struct rcu_work, work))); }); +} + +struct execute_work { + struct work_struct work; +}; +# 207 "./include/linux/workqueue.h" +extern void __init_work(struct work_struct *work, int onstack); +extern void destroy_work_on_stack(struct work_struct *work); +extern void destroy_delayed_work_on_stack(struct delayed_work *work); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int work_static(struct work_struct *work) +{ + return *((unsigned long *)(&(work)->data)) & WORK_STRUCT_STATIC; +} +# 308 "./include/linux/workqueue.h" +enum { + WQ_UNBOUND = 1 << 1, + WQ_FREEZABLE = 1 << 2, + WQ_MEM_RECLAIM = 1 << 3, + WQ_HIGHPRI = 1 << 4, + WQ_CPU_INTENSIVE = 1 << 5, + WQ_SYSFS = 1 << 6, +# 341 "./include/linux/workqueue.h" + WQ_POWER_EFFICIENT = 1 << 7, + + __WQ_DRAINING = 1 << 16, + __WQ_ORDERED = 1 << 17, + __WQ_LEGACY = 1 << 18, + __WQ_ORDERED_EXPLICIT = 1 << 19, + + WQ_MAX_ACTIVE = 512, + WQ_MAX_UNBOUND_PER_CPU = 4, + WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, +}; +# 385 "./include/linux/workqueue.h" +extern struct workqueue_struct *system_wq; +extern struct workqueue_struct *system_highpri_wq; +extern struct workqueue_struct *system_long_wq; +extern struct workqueue_struct *system_unbound_wq; +extern struct workqueue_struct *system_freezable_wq; +extern struct workqueue_struct *system_power_efficient_wq; +extern struct workqueue_struct *system_freezable_power_efficient_wq; +# 407 "./include/linux/workqueue.h" +struct workqueue_struct *alloc_workqueue(const char *fmt, + unsigned int flags, + int max_active, ...); +# 436 "./include/linux/workqueue.h" +extern void destroy_workqueue(struct workqueue_struct *wq); + +struct workqueue_attrs *alloc_workqueue_attrs(void); +void free_workqueue_attrs(struct workqueue_attrs *attrs); +int apply_workqueue_attrs(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs); +int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); + +extern bool queue_work_on(int cpu, struct workqueue_struct *wq, + struct work_struct *work); +extern bool queue_work_node(int node, struct workqueue_struct *wq, + struct work_struct *work); +extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, + struct delayed_work *work, unsigned long delay); +extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, + struct delayed_work *dwork, unsigned long delay); +extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); + +extern void flush_workqueue(struct workqueue_struct *wq); +extern void drain_workqueue(struct workqueue_struct *wq); + +extern int schedule_on_each_cpu(work_func_t func); + +int execute_in_process_context(work_func_t fn, struct execute_work *); + +extern bool flush_work(struct work_struct *work); +extern bool cancel_work_sync(struct work_struct *work); + +extern bool flush_delayed_work(struct delayed_work *dwork); +extern bool cancel_delayed_work(struct delayed_work *dwork); +extern bool cancel_delayed_work_sync(struct delayed_work *dwork); + +extern bool flush_rcu_work(struct rcu_work *rwork); + +extern void workqueue_set_max_active(struct workqueue_struct *wq, + int max_active); +extern struct work_struct *current_work(void); +extern bool current_is_workqueue_rescuer(void); +extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); +extern unsigned int work_busy(struct work_struct *work); +extern __attribute__((__format__(printf, 1, 2))) void set_worker_desc(const char *fmt, ...); +extern void print_worker_info(const char *log_lvl, struct task_struct *task); +extern void show_workqueue_state(void); +extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); +# 504 "./include/linux/workqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool queue_work(struct workqueue_struct *wq, + struct work_struct *work) +{ + return queue_work_on(WORK_CPU_UNBOUND, wq, work); +} +# 518 "./include/linux/workqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool queue_delayed_work(struct workqueue_struct *wq, + struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); +} +# 533 "./include/linux/workqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mod_delayed_work(struct workqueue_struct *wq, + struct delayed_work *dwork, + unsigned long delay) +{ + return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); +} +# 547 "./include/linux/workqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool schedule_work_on(int cpu, struct work_struct *work) +{ + return queue_work_on(cpu, system_wq, work); +} +# 566 "./include/linux/workqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool schedule_work(struct work_struct *work) +{ + return queue_work(system_wq, work); +} +# 595 "./include/linux/workqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_scheduled_work(void) +{ + flush_workqueue(system_wq); +} +# 609 "./include/linux/workqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(cpu, system_wq, dwork, delay); +} +# 623 "./include/linux/workqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool schedule_delayed_work(struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work(system_wq, dwork, delay); +} +# 639 "./include/linux/workqueue.h" +long work_on_cpu(int cpu, long (*fn)(void *), void *arg); +long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg); + + + +extern void freeze_workqueues_begin(void); +extern bool freeze_workqueues_busy(void); +extern void thaw_workqueues(void); + + + +int workqueue_sysfs_register(struct workqueue_struct *wq); + + + + + + +void wq_watchdog_touch(int cpu); + + + + + +int workqueue_prepare_cpu(unsigned int cpu); +int workqueue_online_cpu(unsigned int cpu); +int workqueue_offline_cpu(unsigned int cpu); + + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) workqueue_init_early(void); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) workqueue_init(void); +# 22 "./include/linux/srcu.h" 2 +# 1 "./include/linux/rcu_segcblist.h" 1 +# 21 "./include/linux/rcu_segcblist.h" +struct rcu_cblist { + struct callback_head *head; + struct callback_head **tail; + long len; +}; +# 66 "./include/linux/rcu_segcblist.h" +struct rcu_segcblist { + struct callback_head *head; + struct callback_head **tails[4]; + unsigned long gp_seq[4]; + + atomic_long_t len; + + + + u8 enabled; + u8 offloaded; +}; +# 23 "./include/linux/srcu.h" 2 + +struct srcu_struct; + + + +int __init_srcu_struct(struct srcu_struct *ssp, const char *name, + struct lock_class_key *key); +# 49 "./include/linux/srcu.h" +# 1 "./include/linux/srcutree.h" 1 +# 14 "./include/linux/srcutree.h" +# 1 "./include/linux/rcu_node_tree.h" 1 +# 15 "./include/linux/srcutree.h" 2 + + +struct srcu_node; +struct srcu_struct; + + + + + +struct srcu_data { + + unsigned long srcu_lock_count[2]; + unsigned long srcu_unlock_count[2]; + + + spinlock_t lock __attribute__((__aligned__(1 << (12)))); + struct rcu_segcblist srcu_cblist; + unsigned long srcu_gp_seq_needed; + unsigned long srcu_gp_seq_needed_exp; + bool srcu_cblist_invoking; + struct timer_list delay_work; + struct work_struct work; + struct callback_head srcu_barrier_head; + struct srcu_node *mynode; + unsigned long grpmask; + + int cpu; + struct srcu_struct *ssp; +}; + + + + +struct srcu_node { + spinlock_t lock; + unsigned long srcu_have_cbs[4]; + + + unsigned long srcu_data_have_cbs[4]; + + unsigned long srcu_gp_seq_needed_exp; + struct srcu_node *srcu_parent; + int grplo; + int grphi; +}; + + + + +struct srcu_struct { + struct srcu_node node[(1 + (((8192) + (((16) * 64)) - 1) / (((16) * 64))) + (((8192) + ((16)) - 1) / ((16))))]; + struct srcu_node *level[3 + 1]; + + struct mutex srcu_cb_mutex; + spinlock_t lock; + struct mutex srcu_gp_mutex; + unsigned int srcu_idx; + unsigned long srcu_gp_seq; + unsigned long srcu_gp_seq_needed; + unsigned long srcu_gp_seq_needed_exp; + unsigned long srcu_last_gp_end; + struct srcu_data *sda; + unsigned long srcu_barrier_seq; + struct mutex srcu_barrier_mutex; + struct completion srcu_barrier_completion; + + atomic_t srcu_barrier_cpu_cnt; + + + struct delayed_work work; + + struct lockdep_map dep_map; + +}; +# 137 "./include/linux/srcutree.h" +void synchronize_srcu_expedited(struct srcu_struct *ssp); +void srcu_barrier(struct srcu_struct *ssp); +void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf); +# 50 "./include/linux/srcu.h" 2 + + + + + + + +void call_srcu(struct srcu_struct *ssp, struct callback_head *head, + void (*func)(struct callback_head *head)); +void cleanup_srcu_struct(struct srcu_struct *ssp); +int __srcu_read_lock(struct srcu_struct *ssp) ; +void __srcu_read_unlock(struct srcu_struct *ssp, int idx) ; +void synchronize_srcu(struct srcu_struct *ssp); +# 82 "./include/linux/srcu.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int srcu_read_lock_held(const struct srcu_struct *ssp) +{ + if (!debug_lockdep_rcu_enabled()) + return 1; + return lock_is_held(&ssp->dep_map); +} +# 150 "./include/linux/srcu.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int srcu_read_lock(struct srcu_struct *ssp) +{ + int retval; + + retval = __srcu_read_lock(ssp); + rcu_lock_acquire(&(ssp)->dep_map); + return retval; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int +srcu_read_lock_notrace(struct srcu_struct *ssp) +{ + int retval; + + retval = __srcu_read_lock(ssp); + return retval; +} +# 176 "./include/linux/srcu.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void srcu_read_unlock(struct srcu_struct *ssp, int idx) + +{ + ({ int __ret_warn_on = !!(idx & ~0x1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (325)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/srcu.h"), "i" (179), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (326)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (327)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + rcu_lock_release(&(ssp)->dep_map); + __srcu_read_unlock(ssp, idx); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) void +srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) +{ + __srcu_read_unlock(ssp, idx); +} +# 200 "./include/linux/srcu.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_mb__after_srcu_read_unlock(void) +{ + +} +# 17 "./include/linux/notifier.h" 2 +# 49 "./include/linux/notifier.h" +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *nb, + unsigned long action, void *data); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + int priority; +}; + +struct atomic_notifier_head { + spinlock_t lock; + struct notifier_block *head; +}; + +struct blocking_notifier_head { + struct rw_semaphore rwsem; + struct notifier_block *head; +}; + +struct raw_notifier_head { + struct notifier_block *head; +}; + +struct srcu_notifier_head { + struct mutex mutex; + struct srcu_struct srcu; + struct notifier_block *head; +}; +# 93 "./include/linux/notifier.h" +extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); +# 144 "./include/linux/notifier.h" +extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, + struct notifier_block *nb); +extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, + struct notifier_block *nb); +extern int raw_notifier_chain_register(struct raw_notifier_head *nh, + struct notifier_block *nb); +extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, + struct notifier_block *nb); + +extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, + struct notifier_block *nb); +extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, + struct notifier_block *nb); +extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, + struct notifier_block *nb); +extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, + struct notifier_block *nb); + +extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v); +extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v); +extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v); +extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v); +extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v, int nr_to_call, int *nr_calls); +# 190 "./include/linux/notifier.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int notifier_from_errno(int err) +{ + if (err) + return 0x8000 | (0x0001 - err); + + return 0x0001; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int notifier_to_errno(int ret) +{ + ret &= ~0x8000; + return ret > 0x0001 ? 0x0001 - ret : 0; +} +# 234 "./include/linux/notifier.h" +extern struct blocking_notifier_head reboot_notifier_list; +# 14 "./arch/x86/include/asm/uprobes.h" 2 + +typedef u8 uprobe_opcode_t; + + + + + + + +struct uprobe_xol_ops; + +struct arch_uprobe { + union { + u8 insn[16]; + u8 ixol[16]; + }; + + const struct uprobe_xol_ops *ops; + + union { + struct { + s32 offs; + u8 ilen; + u8 opc1; + } branch; + struct { + u8 fixups; + u8 ilen; + } defparam; + struct { + u8 reg_offset; + u8 ilen; + } push; + }; +}; + +struct arch_uprobe_task { + + unsigned long saved_scratch_register; + + unsigned int saved_trap_nr; + unsigned int saved_tf; +}; +# 50 "./include/linux/uprobes.h" 2 + +enum uprobe_task_state { + UTASK_RUNNING, + UTASK_SSTEP, + UTASK_SSTEP_ACK, + UTASK_SSTEP_TRAPPED, +}; + + + + +struct uprobe_task { + enum uprobe_task_state state; + + union { + struct { + struct arch_uprobe_task autask; + unsigned long vaddr; + }; + + struct { + struct callback_head dup_xol_work; + unsigned long dup_xol_addr; + }; + }; + + struct uprobe *active_uprobe; + unsigned long xol_vaddr; + + struct return_instance *return_instances; + unsigned int depth; +}; + +struct return_instance { + struct uprobe *uprobe; + unsigned long func; + unsigned long stack; + unsigned long orig_ret_vaddr; + bool chained; + + struct return_instance *next; +}; + +enum rp_check { + RP_CHECK_CALL, + RP_CHECK_CHAIN_CALL, + RP_CHECK_RET, +}; + +struct xol_area; + +struct uprobes_state { + struct xol_area *xol_area; +}; + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) uprobes_init(void); +extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); +extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); +extern bool is_swbp_insn(uprobe_opcode_t *insn); +extern bool is_trap_insn(uprobe_opcode_t *insn); +extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs); +extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); +extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); +extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); +extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); +extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); +extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); +extern int uprobe_mmap(struct vm_area_struct *vma); +extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void uprobe_start_dup_mmap(void); +extern void uprobe_end_dup_mmap(void); +extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); +extern void uprobe_free_utask(struct task_struct *t); +extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); +extern int uprobe_post_sstep_notifier(struct pt_regs *regs); +extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); +extern void uprobe_notify_resume(struct pt_regs *regs); +extern bool uprobe_deny_signal(void); +extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs); +extern void uprobe_clear_state(struct mm_struct *mm); +extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); +extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); +extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); +extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); +extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); +extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); +extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); +extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs); +extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); +extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len); +# 15 "./include/linux/mm_types.h" 2 + + + +# 1 "./arch/x86/include/asm/mmu.h" 1 +# 13 "./arch/x86/include/asm/mmu.h" +typedef struct { + + + + + u64 ctx_id; +# 28 "./arch/x86/include/asm/mmu.h" + atomic64_t tlb_gen; + + + struct rw_semaphore ldt_usr_sem; + struct ldt_struct *ldt; + + + + + unsigned short ia32_compat; + + + struct mutex lock; + void *vdso; + const struct vdso_image *vdso_image; + + atomic_t perf_rdpmc_allowed; + + + + + + u16 pkey_allocation_map; + s16 execute_only_pkey; + +} mm_context_t; + + + + + + + +void leave_mm(int cpu); +# 19 "./include/linux/mm_types.h" 2 + + + + + + + +struct address_space; +struct mem_cgroup; +# 68 "./include/linux/mm_types.h" +struct page { + unsigned long flags; + + + + + + + + union { + struct { + + + + + + struct list_head lru; + + struct address_space *mapping; + unsigned long index; + + + + + + + unsigned long private; + }; + struct { + + + + + dma_addr_t dma_addr; + }; + struct { + union { + struct list_head slab_list; + struct { + struct page *next; + + int pages; + int pobjects; + + + + + }; + }; + struct kmem_cache *slab_cache; + + void *freelist; + union { + void *s_mem; + unsigned long counters; + struct { + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; + }; + struct { + unsigned long compound_head; + + + unsigned char compound_dtor; + unsigned char compound_order; + atomic_t compound_mapcount; + }; + struct { + unsigned long _compound_pad_1; + atomic_t hpage_pinned_refcount; + + struct list_head deferred_list; + }; + struct { + unsigned long _pt_pad_1; + pgtable_t pmd_huge_pte; + unsigned long _pt_pad_2; + union { + struct mm_struct *pt_mm; + atomic_t pt_frag_refcount; + }; + + spinlock_t *ptl; + + + + }; + struct { + + struct dev_pagemap *pgmap; + void *zone_device_data; +# 172 "./include/linux/mm_types.h" + }; + + + struct callback_head callback_head; + }; + + union { + + + + + atomic_t _mapcount; + + + + + + + + unsigned int page_type; + + unsigned int active; + int units; + }; + + + atomic_t _refcount; + + + struct mem_cgroup *mem_cgroup; +# 222 "./include/linux/mm_types.h" +} __attribute__((__aligned__(2 * sizeof(unsigned long)))); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) atomic_t *compound_mapcount_ptr(struct page *page) +{ + return &page[1].compound_mapcount; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) atomic_t *compound_pincount_ptr(struct page *page) +{ + return &page[2].hpage_pinned_refcount; +} +# 244 "./include/linux/mm_types.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_private(struct page *page, unsigned long private) +{ + page->private = private; +} + +struct page_frag_cache { + void * va; + + __u16 offset; + __u16 size; + + + + + + + unsigned int pagecnt_bias; + bool pfmemalloc; +}; + +typedef unsigned long vm_flags_t; + + + + + + +struct vm_region { + struct rb_node vm_rb; + vm_flags_t vm_flags; + unsigned long vm_start; + unsigned long vm_end; + unsigned long vm_top; + unsigned long vm_pgoff; + struct file *vm_file; + + int vm_usage; + bool vm_icache_flushed : 1; + +}; + + + +struct vm_userfaultfd_ctx { + struct userfaultfd_ctx *ctx; +}; +# 301 "./include/linux/mm_types.h" +struct vm_area_struct { + + + unsigned long vm_start; + unsigned long vm_end; + + + + struct vm_area_struct *vm_next, *vm_prev; + + struct rb_node vm_rb; + + + + + + + + unsigned long rb_subtree_gap; + + + + struct mm_struct *vm_mm; + + + + + + pgprot_t vm_page_prot; + unsigned long vm_flags; + + + + + + struct { + struct rb_node rb; + unsigned long rb_subtree_last; + } shared; + + + + + + + + struct list_head anon_vma_chain; + + struct anon_vma *anon_vma; + + + const struct vm_operations_struct *vm_ops; + + + unsigned long vm_pgoff; + + struct file * vm_file; + void * vm_private_data; + + + atomic_long_t swap_readahead_info; + + + + + + struct mempolicy *vm_policy; + + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +} __attribute__((__designated_init__)); + +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + +struct kioctx_table; +struct mm_struct { + struct { + struct vm_area_struct *mmap; + struct rb_root mm_rb; + u64 vmacache_seqnum; + + unsigned long (*get_unmapped_area) (struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags); + + unsigned long mmap_base; + unsigned long mmap_legacy_base; + + + unsigned long mmap_compat_base; + unsigned long mmap_compat_legacy_base; + + unsigned long task_size; + unsigned long highest_vm_end; + pgd_t * pgd; +# 412 "./include/linux/mm_types.h" + atomic_t membarrier_state; +# 424 "./include/linux/mm_types.h" + atomic_t mm_users; +# 433 "./include/linux/mm_types.h" + atomic_t mm_count; + + + atomic_long_t pgtables_bytes; + + int map_count; + + spinlock_t page_table_lock; + + + struct rw_semaphore mmap_lock; + + struct list_head mmlist; + + + + + + + unsigned long hiwater_rss; + unsigned long hiwater_vm; + + unsigned long total_vm; + unsigned long locked_vm; + atomic64_t pinned_vm; + unsigned long data_vm; + unsigned long exec_vm; + unsigned long stack_vm; + unsigned long def_flags; + + spinlock_t arg_lock; + unsigned long start_code, end_code, start_data, end_data; + unsigned long start_brk, brk, start_stack; + unsigned long arg_start, arg_end, env_start, env_end; + + unsigned long saved_auxv[(2*(2 + 20 + 1))]; + + + + + + struct mm_rss_stat rss_stat; + + struct linux_binfmt *binfmt; + + + mm_context_t context; + + unsigned long flags; + + struct core_state *core_state; + + + spinlock_t ioctx_lock; + struct kioctx_table *ioctx_table; +# 500 "./include/linux/mm_types.h" + struct task_struct *owner; + + struct user_namespace *user_ns; + + + struct file *exe_file; + + struct mmu_notifier_subscriptions *notifier_subscriptions; +# 518 "./include/linux/mm_types.h" + unsigned long numa_next_scan; + + + unsigned long numa_scan_offset; + + + int numa_scan_seq; + + + + + + + atomic_t tlb_flush_pending; + + + bool tlb_flush_batched; + + struct uprobes_state uprobes_state; + + atomic_long_t hugetlb_usage; + + struct work_struct async_put_work; + } __attribute__((__designated_init__)); + + + + + + unsigned long cpu_bitmap[]; +}; + +extern struct mm_struct init_mm; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_init_cpumask(struct mm_struct *mm) +{ + unsigned long cpu_bitmap = (unsigned long)mm; + + cpu_bitmap += __builtin_offsetof(struct mm_struct, cpu_bitmap); + cpumask_clear((struct cpumask *)cpu_bitmap); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) cpumask_t *mm_cpumask(struct mm_struct *mm) +{ + return (struct cpumask *)&mm->cpu_bitmap; +} + +struct mmu_gather; +extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_tlb_flush_pending(struct mm_struct *mm) +{ + atomic_set(&mm->tlb_flush_pending, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inc_tlb_flush_pending(struct mm_struct *mm) +{ + atomic_inc(&mm->tlb_flush_pending); +# 617 "./include/linux/mm_types.h" +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dec_tlb_flush_pending(struct mm_struct *mm) +{ +# 629 "./include/linux/mm_types.h" + atomic_dec(&mm->tlb_flush_pending); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mm_tlb_flush_pending(struct mm_struct *mm) +{ +# 642 "./include/linux/mm_types.h" + return atomic_read(&mm->tlb_flush_pending); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mm_tlb_flush_nested(struct mm_struct *mm) +{ + + + + + + + + return atomic_read(&mm->tlb_flush_pending) > 1; +} + +struct vm_fault; + + + + + + +typedef unsigned int vm_fault_t; +# 691 "./include/linux/mm_types.h" +enum vm_fault_reason { + VM_FAULT_OOM = ( vm_fault_t)0x000001, + VM_FAULT_SIGBUS = ( vm_fault_t)0x000002, + VM_FAULT_MAJOR = ( vm_fault_t)0x000004, + VM_FAULT_WRITE = ( vm_fault_t)0x000008, + VM_FAULT_HWPOISON = ( vm_fault_t)0x000010, + VM_FAULT_HWPOISON_LARGE = ( vm_fault_t)0x000020, + VM_FAULT_SIGSEGV = ( vm_fault_t)0x000040, + VM_FAULT_NOPAGE = ( vm_fault_t)0x000100, + VM_FAULT_LOCKED = ( vm_fault_t)0x000200, + VM_FAULT_RETRY = ( vm_fault_t)0x000400, + VM_FAULT_FALLBACK = ( vm_fault_t)0x000800, + VM_FAULT_DONE_COW = ( vm_fault_t)0x001000, + VM_FAULT_NEEDDSYNC = ( vm_fault_t)0x002000, + VM_FAULT_HINDEX_MASK = ( vm_fault_t)0x0f0000, +}; +# 731 "./include/linux/mm_types.h" +struct vm_special_mapping { + const char *name; + + + + + + + + struct page **pages; + + + + + + vm_fault_t (*fault)(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, + struct vm_fault *vmf); + + int (*mremap)(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma); +}; + +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH, + TLB_REMOTE_SHOOTDOWN, + TLB_LOCAL_SHOOTDOWN, + TLB_LOCAL_MM_SHOOTDOWN, + TLB_REMOTE_SEND_IPI, + NR_TLB_FLUSH_REASONS, +}; + + + + + +typedef struct { + unsigned long val; +} swp_entry_t; +# 22 "./include/linux/mmzone.h" 2 +# 1 "./include/linux/page-flags.h" 1 +# 104 "./include/linux/page-flags.h" +enum pageflags { + PG_locked, + PG_referenced, + PG_uptodate, + PG_dirty, + PG_lru, + PG_active, + PG_workingset, + PG_waiters, + PG_error, + PG_slab, + PG_owner_priv_1, + PG_arch_1, + PG_reserved, + PG_private, + PG_private_2, + PG_writeback, + PG_head, + PG_mappedtodisk, + PG_reclaim, + PG_swapbacked, + PG_unevictable, + + PG_mlocked, + + + PG_uncached, + + + PG_hwpoison, + + + PG_young, + PG_idle, + + __NR_PAGEFLAGS, + + + PG_checked = PG_owner_priv_1, + + + PG_swapcache = PG_owner_priv_1, + + + + + + PG_fscache = PG_private_2, + + + + PG_pinned = PG_owner_priv_1, + + PG_savepinned = PG_dirty, + + PG_foreign = PG_owner_priv_1, + + PG_xen_remapped = PG_owner_priv_1, + + + PG_slob_free = PG_private, + + + PG_double_map = PG_private_2, + + + PG_isolated = PG_reclaim, + + + PG_reported = PG_uptodate, +}; + + + +struct page; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *compound_head(struct page *page) +{ + unsigned long head = ({ do { extern void __compiletime_assert_328(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_328(); } while (0); ({ typeof( _Generic((page->compound_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->compound_head))) __x = (*(const volatile typeof( _Generic((page->compound_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->compound_head))) *)&(page->compound_head)); do { } while (0); (typeof(page->compound_head))__x; }); }); + + if (__builtin_expect(!!(head & 1), 0)) + return (struct page *) (head - 1); + return page; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageTail(struct page *page) +{ + return ({ do { extern void __compiletime_assert_329(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_329(); } while (0); ({ typeof( _Generic((page->compound_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->compound_head))) __x = (*(const volatile typeof( _Generic((page->compound_head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->compound_head))) *)&(page->compound_head)); do { } while (0); (typeof(page->compound_head))__x; }); }) & 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageCompound(struct page *page) +{ + return test_bit(PG_head, &page->flags) || PageTail(page); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PagePoisoned(const struct page *page) +{ + return page->flags == -1l; +} + + +void page_init_poison(struct page *page, size_t size); +# 320 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageLocked(struct page *page) { return test_bit(PG_locked, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (330)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (320), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (331)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (332)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (320), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (333)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageLocked(struct page *page) { __set_bit(PG_locked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (334)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (320), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (335)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (336)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (320), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (337)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageLocked(struct page *page) { __clear_bit(PG_locked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (338)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (320), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (339)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (340)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (320), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (341)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageWaiters(struct page *page) { return test_bit(PG_waiters, &({ do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (342)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (343)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (344)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (345)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageWaiters(struct page *page) { set_bit(PG_waiters, &({ do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (346)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (347)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (348)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (349)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageWaiters(struct page *page) { clear_bit(PG_waiters, &({ do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (350)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (351)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (352)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (353)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageWaiters(struct page *page) { __clear_bit(PG_waiters, &({ do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (354)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (355)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (356)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (321), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (357)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageError(struct page *page) { return test_bit(PG_error, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (358)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (322), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (359)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (360)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (322), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (361)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageError(struct page *page) { set_bit(PG_error, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (362)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (322), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (363)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (364)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (322), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (365)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageError(struct page *page) { clear_bit(PG_error, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (366)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (322), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (367)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (368)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (322), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (369)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageError(struct page *page) { return test_and_clear_bit(PG_error, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (370)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (322), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (371)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (372)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (322), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (373)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReferenced(struct page *page) { return test_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (374)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (323), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (375)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (376)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (323), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (377)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (378)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (323), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (379)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (380)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (324), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (381)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageReferenced(struct page *page) { __set_bit(PG_referenced, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (382)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (325), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (383)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageDirty(struct page *page) { return test_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (384)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (326), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (385)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (386)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (326), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (387)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (388)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (326), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (389)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (390)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (326), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (391)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (392)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (326), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (393)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (394)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (327), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (395)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageLRU(struct page *page) { return test_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (396)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (328), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (397)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (398)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (328), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (399)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (400)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (328), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (401)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (402)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (328), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (403)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageActive(struct page *page) { return test_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (404)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (329), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (405)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageActive(struct page *page) { set_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (406)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (329), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (407)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (408)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (329), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (409)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (410)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (329), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (411)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (412)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (330), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (413)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageWorkingset(struct page *page) { return test_bit(PG_workingset, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (414)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (331), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (415)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageWorkingset(struct page *page) { set_bit(PG_workingset, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (416)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (331), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (417)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageWorkingset(struct page *page) { clear_bit(PG_workingset, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (418)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (331), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (419)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageWorkingset(struct page *page) { return test_and_clear_bit(PG_workingset, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (420)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (332), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (421)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSlab(struct page *page) { return test_bit(PG_slab, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (422)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (333), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (423)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (424)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (333), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (425)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (426)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (333), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (427)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (428)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (333), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (429)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (430)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (333), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (431)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (432)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (333), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (433)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSlobFree(struct page *page) { return test_bit(PG_slob_free, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (434)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (334), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (435)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (436)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (334), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (437)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (438)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (334), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (439)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (440)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (334), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (441)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (442)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (334), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (443)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (444)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (334), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (445)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageChecked(struct page *page) { return test_bit(PG_checked, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (446)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (335), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (447)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (448)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (335), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (449)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (450)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (335), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (451)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (452)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (335), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (453)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (454)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (335), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (455)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (456)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (335), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (457)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PagePinned(struct page *page) { return test_bit(PG_pinned, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (458)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (338), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (459)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (460)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (338), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (461)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (462)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (338), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (463)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (464)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (338), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (465)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (466)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (338), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (467)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (468)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (338), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (469)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (470)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (339), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (471)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (472)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (339), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (473)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (474)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (339), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (475)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (476)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (339), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (477)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSavePinned(struct page *page) { return test_bit(PG_savepinned, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (478)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (340), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (479)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (480)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (340), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (481)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (482)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (340), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (483)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (484)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (340), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (485)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (486)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (340), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (487)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (488)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (340), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (489)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageForeign(struct page *page) { return test_bit(PG_foreign, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (490)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (341), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (491)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (492)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (341), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (493)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageForeign(struct page *page) { set_bit(PG_foreign, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (494)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (341), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (495)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (496)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (341), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (497)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageForeign(struct page *page) { clear_bit(PG_foreign, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (498)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (341), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (499)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (500)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (341), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (501)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); }; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageXenRemapped(struct page *page) { return test_bit(PG_xen_remapped, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (502)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (342), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (503)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (504)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (342), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (505)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageXenRemapped(struct page *page) { set_bit(PG_xen_remapped, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (506)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (342), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (507)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (508)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (342), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (509)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageXenRemapped(struct page *page) { clear_bit(PG_xen_remapped, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (510)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (342), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (511)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (512)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (342), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (513)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageXenRemapped(struct page *page) { return test_and_clear_bit(PG_xen_remapped, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (514)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (343), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (515)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (516)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (343), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (517)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReserved(struct page *page) { return test_bit(PG_reserved, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (518)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (345), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (519)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (520)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (345), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (521)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (522)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (345), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (523)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (524)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (345), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (525)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (526)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (345), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (527)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (528)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (345), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (529)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (530)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (346), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (531)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (532)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (346), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (533)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageReserved(struct page *page) { __set_bit(PG_reserved, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (534)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (347), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (535)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (536)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (347), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (537)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSwapBacked(struct page *page) { return test_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (538)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (348), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (539)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (540)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (348), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (541)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (542)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (348), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (543)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (544)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (348), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (545)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (546)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (348), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (547)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (548)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (348), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (549)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (550)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (349), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (551)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (552)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (349), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (553)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageSwapBacked(struct page *page) { __set_bit(PG_swapbacked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (554)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (350), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (555)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (556)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (350), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (557)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PagePrivate(struct page *page) { return test_bit(PG_private, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (558)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (357), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (559)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (560)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (357), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (561)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (562)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (357), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (563)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (564)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (357), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (565)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (566)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (358), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (567)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PagePrivate2(struct page *page) { return test_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (568)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (359), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (569)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPagePrivate2(struct page *page) { set_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (570)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (359), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (571)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPagePrivate2(struct page *page) { clear_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (572)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (359), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (573)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPagePrivate2(struct page *page) { return test_and_set_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (574)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (359), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (575)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPagePrivate2(struct page *page) { return test_and_clear_bit(PG_private_2, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (576)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (359), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (577)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageOwnerPriv1(struct page *page) { return test_bit(PG_owner_priv_1, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (578)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (360), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (579)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageOwnerPriv1(struct page *page) { set_bit(PG_owner_priv_1, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (580)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (360), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (581)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageOwnerPriv1(struct page *page) { clear_bit(PG_owner_priv_1, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (582)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (360), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (583)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageOwnerPriv1(struct page *page) { return test_and_clear_bit(PG_owner_priv_1, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (584)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (361), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (585)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageWriteback(struct page *page) { return test_bit(PG_writeback, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (586)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (367), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (587)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (588)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (367), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (589)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (590)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (368), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (591)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (592)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (368), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (593)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (594)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (368), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (595)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (596)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (368), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (597)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageMappedToDisk(struct page *page) { return test_bit(PG_mappedtodisk, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (598)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (369), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (599)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (600)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (369), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (601)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (602)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (369), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (603)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (604)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (369), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (605)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (606)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (369), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (607)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (608)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (369), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (609)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReclaim(struct page *page) { return test_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (610)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (372), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (611)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (612)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (372), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (613)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (614)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (372), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (615)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (616)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (372), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (617)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (618)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (372), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (619)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (620)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (372), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (621)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (622)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (373), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (623)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (624)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (373), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (625)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReadahead(struct page *page) { return test_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (626)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (374), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (627)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (628)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (374), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (629)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (630)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (374), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (631)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (632)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (374), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (633)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (634)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (374), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (635)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (636)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (374), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (637)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageReadahead(struct page *page) { return test_and_clear_bit(PG_reclaim, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (638)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (375), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (639)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (640)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (375), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (641)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } +# 384 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageHighMem(const struct page *page) { return 0; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void SetPageHighMem(struct page *page) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ClearPageHighMem(struct page *page) { } + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageSwapCache(struct page *page) +{ + + page = compound_head(page); + + return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags); + +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageSwapCache(struct page *page) { set_bit(PG_swapcache, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (642)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (396), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (643)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (644)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (396), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (645)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageSwapCache(struct page *page) { clear_bit(PG_swapcache, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (646)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (397), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (647)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (648)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (397), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (649)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageUnevictable(struct page *page) { return test_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (650)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (402), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (651)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageUnevictable(struct page *page) { set_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (652)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (402), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (653)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageUnevictable(struct page *page) { clear_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (654)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (402), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (655)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageUnevictable(struct page *page) { __clear_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (656)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (403), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (657)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageUnevictable(struct page *page) { return test_and_clear_bit(PG_unevictable, &({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (658)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (404), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (659)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); })->flags); } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageMlocked(struct page *page) { return test_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(0 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (660)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (407), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (661)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (662)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (407), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (663)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageMlocked(struct page *page) { set_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (664)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (407), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (665)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (666)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (407), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (667)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageMlocked(struct page *page) { clear_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (668)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (407), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (669)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (670)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (407), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (671)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageMlocked(struct page *page) { __clear_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (672)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (408), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (673)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (674)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (408), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (675)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPageMlocked(struct page *page) { return test_and_set_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (676)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (409), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (677)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (678)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (409), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (679)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageMlocked(struct page *page) { return test_and_clear_bit(PG_mlocked, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (680)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (409), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (681)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (682)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (409), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (683)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageUncached(struct page *page) { return test_bit(PG_uncached, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (684)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (416), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (685)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (686)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (416), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (687)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageUncached(struct page *page) { set_bit(PG_uncached, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (688)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (416), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (689)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (690)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (416), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (691)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageUncached(struct page *page) { clear_bit(PG_uncached, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (692)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (416), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (693)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (694)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (416), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (695)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageHWPoison(struct page *page) { return test_bit(PG_hwpoison, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (696)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (422), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (697)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageHWPoison(struct page *page) { set_bit(PG_hwpoison, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (698)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (422), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (699)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageHWPoison(struct page *page) { clear_bit(PG_hwpoison, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (700)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (422), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (701)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestSetPageHWPoison(struct page *page) { return test_and_set_bit(PG_hwpoison, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (702)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (423), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (703)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageHWPoison(struct page *page) { return test_and_clear_bit(PG_hwpoison, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (704)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (423), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (705)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } + +extern bool set_hwpoison_free_buddy_page(struct page *page); +# 436 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageYoung(struct page *page) { return test_bit(PG_young, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (706)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (436), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (707)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageYoung(struct page *page) { set_bit(PG_young, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (708)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (437), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (709)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int TestClearPageYoung(struct page *page) { return test_and_clear_bit(PG_young, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (710)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (438), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (711)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageIdle(struct page *page) { return test_bit(PG_idle, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (712)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (439), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (713)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageIdle(struct page *page) { set_bit(PG_idle, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (714)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (439), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (715)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageIdle(struct page *page) { clear_bit(PG_idle, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (716)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (439), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (717)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } +# 448 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageReported(struct page *page) { return test_bit(PG_reported, &({ do { if (__builtin_expect(!!(0 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "0 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (718)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (448), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (719)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (720)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (448), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (721)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageReported(struct page *page) { __set_bit(PG_reported, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (722)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (448), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (723)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (724)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (448), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (725)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageReported(struct page *page) { __clear_bit(PG_reported, &({ do { if (__builtin_expect(!!(1 && PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (726)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (448), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (727)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (728)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (448), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (729)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; }); })->flags); } +# 472 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageMappingFlags(struct page *page) +{ + return ((unsigned long)page->mapping & (0x1 | 0x2)) != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageAnon(struct page *page) +{ + page = compound_head(page); + return ((unsigned long)page->mapping & 0x1) != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int __PageMovable(struct page *page) +{ + return ((unsigned long)page->mapping & (0x1 | 0x2)) == + 0x2; +} +# 496 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageKsm(struct page *page) +{ + page = compound_head(page); + return ((unsigned long)page->mapping & (0x1 | 0x2)) == + (0x1 | 0x2); +} + + + + +u64 stable_page_flags(struct page *page); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageUptodate(struct page *page) +{ + int ret; + page = compound_head(page); + ret = test_bit(PG_uptodate, &(page)->flags); +# 521 "./include/linux/page-flags.h" + if (ret) + __asm__ __volatile__("": : :"memory"); + + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageUptodate(struct page *page) +{ + do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (730)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (529), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (731)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + __asm__ __volatile__("": : :"memory"); + __set_bit(PG_uptodate, &page->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void SetPageUptodate(struct page *page) +{ + do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (732)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (536), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (733)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + + + + + + __asm__ __volatile__("": : :"memory"); + set_bit(PG_uptodate, &page->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &({ do { if (__builtin_expect(!!(1 && PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "1 && PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (734)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (546), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (735)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); ({ do { if (__builtin_expect(!!(PagePoisoned(compound_head(page))), 0)) { dump_page(compound_head(page), "VM_BUG_ON_PAGE(" "PagePoisoned(compound_head(page))"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (736)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (546), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (737)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); compound_head(page); }); })->flags); } + +int test_clear_page_writeback(struct page *page); +int __test_set_page_writeback(struct page *page, bool keep_write); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_writeback(struct page *page) +{ + __test_set_page_writeback(page, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_writeback_keepwrite(struct page *page) +{ + __test_set_page_writeback(page, true); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageHead(struct page *page) { return test_bit(PG_head, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (738)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (566), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (739)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (740)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (566), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (741)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (742)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (566), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (743)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ClearPageHead(struct page *page) { clear_bit(PG_head, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (744)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (566), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (745)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void set_compound_head(struct page *page, struct page *head) +{ + do { do { extern void __compiletime_assert_746(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_746(); } while (0); do { *(volatile typeof(page->compound_head) *)&(page->compound_head) = ((unsigned long)head + 1); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void clear_compound_head(struct page *page) +{ + do { do { extern void __compiletime_assert_747(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(page->compound_head) == sizeof(char) || sizeof(page->compound_head) == sizeof(short) || sizeof(page->compound_head) == sizeof(int) || sizeof(page->compound_head) == sizeof(long)) || sizeof(page->compound_head) == sizeof(long long))) __compiletime_assert_747(); } while (0); do { *(volatile typeof(page->compound_head) *)&(page->compound_head) = (0); } while (0); } while (0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ClearPageCompound(struct page *page) +{ + do { if (__builtin_expect(!!(!PageHead(page)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (748)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (581), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (749)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + ClearPageHead(page); +} + + + + + +int PageHuge(struct page *page); +int PageHeadHuge(struct page *page); +bool page_huge_active(struct page *page); +# 612 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageTransHuge(struct page *page) +{ + do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (750)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (614), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (751)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + return PageHead(page); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageTransCompound(struct page *page) +{ + return PageCompound(page); +} +# 649 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageTransCompoundMap(struct page *page) +{ + struct page *head; + + if (!PageTransCompound(page)) + return 0; + + if (PageAnon(page)) + return atomic_read(&page->_mapcount) < 0; + + head = compound_head(page); + + return atomic_read(&page->_mapcount) == + atomic_read(compound_mapcount_ptr(head)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageTransTail(struct page *page) +{ + return PageTail(page); +} +# 688 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageDoubleMap(struct page *page) +{ + return PageHead(page) && test_bit(PG_double_map, &page[1].flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void SetPageDoubleMap(struct page *page) +{ + do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (752)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (695), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (753)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + set_bit(PG_double_map, &page[1].flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ClearPageDoubleMap(struct page *page) +{ + do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (754)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (701), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (755)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + clear_bit(PG_double_map, &page[1].flags); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int TestSetPageDoubleMap(struct page *page) +{ + do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (756)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (706), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (757)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + return test_and_set_bit(PG_double_map, &page[1].flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int TestClearPageDoubleMap(struct page *page) +{ + do { if (__builtin_expect(!!(!PageHead(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHead(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (758)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (712), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (759)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + return test_and_clear_bit(PG_double_map, &page[1].flags); +} +# 747 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_has_type(struct page *page) +{ + return (int)page->page_type < -128; +} +# 772 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageBuddy(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000080)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageBuddy(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (760)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (772), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (761)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000080; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageBuddy(struct page *page) { do { if (__builtin_expect(!!(!PageBuddy(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageBuddy(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (762)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (772), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (763)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000080; } +# 791 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageOffline(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000100)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageOffline(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (764)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (791), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (765)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000100; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageOffline(struct page *page) { do { if (__builtin_expect(!!(!PageOffline(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageOffline(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (766)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (791), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (767)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000100; } + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageKmemcg(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000200)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageKmemcg(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (768)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (797), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (769)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000200; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageKmemcg(struct page *page) { do { if (__builtin_expect(!!(!PageKmemcg(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageKmemcg(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (770)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (797), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (771)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000200; } + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageTable(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000400)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageTable(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (772)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (802), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (773)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000400; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageTable(struct page *page) { do { if (__builtin_expect(!!(!PageTable(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageTable(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (774)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (802), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (775)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000400; } + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageGuard(struct page *page) { return ((page->page_type & (0xf0000000 | 0x00000800)) == 0xf0000000); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageGuard(struct page *page) { do { if (__builtin_expect(!!(!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!((page->page_type & (0xf0000000 | 0)) == 0xf0000000)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (776)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (807), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (777)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type &= ~0x00000800; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageGuard(struct page *page) { do { if (__builtin_expect(!!(!PageGuard(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageGuard(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (778)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (807), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (779)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page->page_type |= 0x00000800; } + +extern bool is_free_buddy_page(struct page *page); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int PageIsolated(struct page *page) { return test_bit(PG_isolated, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (780)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (811), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (781)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __SetPageIsolated(struct page *page) { __set_bit(PG_isolated, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (782)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (811), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (783)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __ClearPageIsolated(struct page *page) { __clear_bit(PG_isolated, &({ do { if (__builtin_expect(!!(PagePoisoned(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PagePoisoned(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (784)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (811), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (785)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); page; })->flags); }; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int PageSlabPfmemalloc(struct page *page) +{ + do { if (__builtin_expect(!!(!PageSlab(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageSlab(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (786)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (819), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (787)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + return PageActive(page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void SetPageSlabPfmemalloc(struct page *page) +{ + do { if (__builtin_expect(!!(!PageSlab(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageSlab(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (788)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (825), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (789)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + SetPageActive(page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __ClearPageSlabPfmemalloc(struct page *page) +{ + do { if (__builtin_expect(!!(!PageSlab(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageSlab(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (790)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (831), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (791)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + __ClearPageActive(page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ClearPageSlabPfmemalloc(struct page *page) +{ + do { if (__builtin_expect(!!(!PageSlab(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageSlab(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (792)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (837), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (793)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + ClearPageActive(page); +} +# 878 "./include/linux/page-flags.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_has_private(struct page *page) +{ + return !!(page->flags & (1UL << PG_private | 1UL << PG_private_2)); +} +# 23 "./include/linux/mmzone.h" 2 +# 41 "./include/linux/mmzone.h" +enum migratetype { + MIGRATE_UNMOVABLE, + MIGRATE_MOVABLE, + MIGRATE_RECLAIMABLE, + MIGRATE_PCPTYPES, + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, +# 61 "./include/linux/mmzone.h" + MIGRATE_CMA, + + + MIGRATE_ISOLATE, + + MIGRATE_TYPES +}; + + +extern const char * const migratetype_names[MIGRATE_TYPES]; +# 80 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_migrate_movable(int mt) +{ + return __builtin_expect(!!((mt) == MIGRATE_CMA), 0) || mt == MIGRATE_MOVABLE; +} + + + + + +extern int page_group_by_mobility_disabled; +# 98 "./include/linux/mmzone.h" +struct free_area { + struct list_head free_list[MIGRATE_TYPES]; + unsigned long nr_free; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *get_page_from_free_area(struct free_area *area, + int migratetype) +{ + return ({ struct list_head *head__ = (&area->free_list[migratetype]); struct list_head *pos__ = ({ do { extern void __compiletime_assert_794(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(head__->next) == sizeof(char) || sizeof(head__->next) == sizeof(short) || sizeof(head__->next) == sizeof(int) || sizeof(head__->next) == sizeof(long)) || sizeof(head__->next) == sizeof(long long))) __compiletime_assert_794(); } while (0); ({ typeof( _Generic((head__->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head__->next))) __x = (*(const volatile typeof( _Generic((head__->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head__->next))) *)&(head__->next)); do { } while (0); (typeof(head__->next))__x; }); }); pos__ != head__ ? ({ void *__mptr = (void *)(pos__); do { extern void __compiletime_assert_795(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(pos__)), typeof(((struct page *)0)->lru)) && !__builtin_types_compatible_p(typeof(*(pos__)), typeof(void))))) __compiletime_assert_795(); } while (0); ((struct page *)(__mptr - __builtin_offsetof(struct page, lru))); }) : ((void *)0); }) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool free_area_empty(struct free_area *area, int migratetype) +{ + return list_empty(&area->free_list[migratetype]); +} + +struct pglist_data; +# 124 "./include/linux/mmzone.h" +struct zone_padding { + char x[0]; +} __attribute__((__aligned__(1 << (12)))); + + + + + + +enum numa_stat_item { + NUMA_HIT, + NUMA_MISS, + NUMA_FOREIGN, + NUMA_INTERLEAVE_HIT, + NUMA_LOCAL, + NUMA_OTHER, + NR_VM_NUMA_STAT_ITEMS +}; + + + + +enum zone_stat_item { + + NR_FREE_PAGES, + NR_ZONE_LRU_BASE, + NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, + NR_ZONE_ACTIVE_ANON, + NR_ZONE_INACTIVE_FILE, + NR_ZONE_ACTIVE_FILE, + NR_ZONE_UNEVICTABLE, + NR_ZONE_WRITE_PENDING, + NR_MLOCK, + NR_PAGETABLE, + NR_KERNEL_STACK_KB, + + + + + NR_BOUNCE, + + NR_ZSPAGES, + + NR_FREE_CMA_PAGES, + NR_VM_ZONE_STAT_ITEMS }; + +enum node_stat_item { + NR_LRU_BASE, + NR_INACTIVE_ANON = NR_LRU_BASE, + NR_ACTIVE_ANON, + NR_INACTIVE_FILE, + NR_ACTIVE_FILE, + NR_UNEVICTABLE, + NR_SLAB_RECLAIMABLE, + NR_SLAB_UNRECLAIMABLE, + NR_ISOLATED_ANON, + NR_ISOLATED_FILE, + WORKINGSET_NODES, + WORKINGSET_REFAULT, + WORKINGSET_ACTIVATE, + WORKINGSET_RESTORE, + WORKINGSET_NODERECLAIM, + NR_ANON_MAPPED, + NR_FILE_MAPPED, + + NR_FILE_PAGES, + NR_FILE_DIRTY, + NR_WRITEBACK, + NR_WRITEBACK_TEMP, + NR_SHMEM, + NR_SHMEM_THPS, + NR_SHMEM_PMDMAPPED, + NR_FILE_THPS, + NR_FILE_PMDMAPPED, + NR_ANON_THPS, + NR_VMSCAN_WRITE, + NR_VMSCAN_IMMEDIATE, + NR_DIRTIED, + NR_WRITTEN, + NR_KERNEL_MISC_RECLAIMABLE, + NR_FOLL_PIN_ACQUIRED, + NR_FOLL_PIN_RELEASED, + NR_VM_NODE_STAT_ITEMS +}; +# 222 "./include/linux/mmzone.h" +enum lru_list { + LRU_INACTIVE_ANON = 0, + LRU_ACTIVE_ANON = 0 + 1, + LRU_INACTIVE_FILE = 0 + 2, + LRU_ACTIVE_FILE = 0 + 2 + 1, + LRU_UNEVICTABLE, + NR_LRU_LISTS +}; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_file_lru(enum lru_list lru) +{ + return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_active_lru(enum lru_list lru) +{ + return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); +} + +enum lruvec_flags { + LRUVEC_CONGESTED, + + +}; + +struct lruvec { + struct list_head lists[NR_LRU_LISTS]; + + + + + + unsigned long anon_cost; + unsigned long file_cost; + + atomic_long_t inactive_age; + + unsigned long refaults; + + unsigned long flags; + + struct pglist_data *pgdat; + +}; +# 279 "./include/linux/mmzone.h" +typedef unsigned isolate_mode_t; + +enum zone_watermarks { + WMARK_MIN, + WMARK_LOW, + WMARK_HIGH, + NR_WMARK +}; + + + + + + +struct per_cpu_pages { + int count; + int high; + int batch; + + + struct list_head lists[MIGRATE_PCPTYPES]; +}; + +struct per_cpu_pageset { + struct per_cpu_pages pcp; + + s8 expire; + u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; + + + s8 stat_threshold; + s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; + +}; + +struct per_cpu_nodestat { + s8 stat_threshold; + s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; +}; + + + +enum zone_type { +# 353 "./include/linux/mmzone.h" + ZONE_DMA, + + + ZONE_DMA32, + + + + + + + ZONE_NORMAL, +# 375 "./include/linux/mmzone.h" + ZONE_MOVABLE, + + ZONE_DEVICE, + + __MAX_NR_ZONES + +}; + + + +struct zone { + + + + unsigned long _watermark[NR_WMARK]; + unsigned long watermark_boost; + + unsigned long nr_reserved_highatomic; +# 403 "./include/linux/mmzone.h" + long lowmem_reserve[5]; + + + int node; + + struct pglist_data *zone_pgdat; + struct per_cpu_pageset *pageset; +# 420 "./include/linux/mmzone.h" + unsigned long zone_start_pfn; +# 457 "./include/linux/mmzone.h" + atomic_long_t managed_pages; + unsigned long spanned_pages; + unsigned long present_pages; + + const char *name; + + + + + + + + unsigned long nr_isolate_pageblock; + + + + + seqlock_t span_seqlock; + + + int initialized; + + + struct zone_padding _pad1_; + + + struct free_area free_area[11]; + + + unsigned long flags; + + + spinlock_t lock; + + + struct zone_padding _pad2_; + + + + + + + unsigned long percpu_drift_mark; + + + + unsigned long compact_cached_free_pfn; + + unsigned long compact_cached_migrate_pfn[2]; + unsigned long compact_init_migrate_pfn; + unsigned long compact_init_free_pfn; +# 516 "./include/linux/mmzone.h" + unsigned int compact_considered; + unsigned int compact_defer_shift; + int compact_order_failed; + + + + + bool compact_blockskip_flush; + + + bool contiguous; + + struct zone_padding _pad3_; + + atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; +} __attribute__((__aligned__(1 << (12)))); + +enum pgdat_flags { + PGDAT_DIRTY, + + + + PGDAT_WRITEBACK, + + + PGDAT_RECLAIM_LOCKED, +}; + +enum zone_flags { + ZONE_BOOSTED_WATERMARK, + + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long zone_managed_pages(struct zone *zone) +{ + return (unsigned long)atomic_long_read(&zone->managed_pages); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long zone_end_pfn(const struct zone *zone) +{ + return zone->zone_start_pfn + zone->spanned_pages; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) +{ + return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zone_is_initialized(struct zone *zone) +{ + return zone->initialized; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zone_is_empty(struct zone *zone) +{ + return zone->spanned_pages == 0; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool zone_intersects(struct zone *zone, + unsigned long start_pfn, unsigned long nr_pages) +{ + if (zone_is_empty(zone)) + return false; + if (start_pfn >= zone_end_pfn(zone) || + start_pfn + nr_pages <= zone->zone_start_pfn) + return false; + + return true; +} +# 602 "./include/linux/mmzone.h" +enum { + ZONELIST_FALLBACK, + + + + + + ZONELIST_NOFALLBACK, + + MAX_ZONELISTS +}; + + + + + +struct zoneref { + struct zone *zone; + int zone_idx; +}; +# 637 "./include/linux/mmzone.h" +struct zonelist { + struct zoneref _zonerefs[((1 << 10) * 5) + 1]; +}; + + + +extern struct page *mem_map; + + + +struct deferred_split { + spinlock_t split_queue_lock; + struct list_head split_queue; + unsigned long split_queue_len; +}; +# 662 "./include/linux/mmzone.h" +typedef struct pglist_data { + + + + + + struct zone node_zones[5]; + + + + + + + struct zonelist node_zonelists[MAX_ZONELISTS]; + + int nr_zones; +# 697 "./include/linux/mmzone.h" + spinlock_t node_size_lock; + + unsigned long node_start_pfn; + unsigned long node_present_pages; + unsigned long node_spanned_pages; + + int node_id; + wait_queue_head_t kswapd_wait; + wait_queue_head_t pfmemalloc_wait; + struct task_struct *kswapd; + + int kswapd_order; + enum zone_type kswapd_highest_zoneidx; + + int kswapd_failures; + + + int kcompactd_max_order; + enum zone_type kcompactd_highest_zoneidx; + wait_queue_head_t kcompactd_wait; + struct task_struct *kcompactd; + + + + + + unsigned long totalreserve_pages; + + + + + + unsigned long min_unmapped_pages; + unsigned long min_slab_pages; + + + + struct zone_padding _pad1_; + spinlock_t lru_lock; + + + + + + + unsigned long first_deferred_pfn; + + + + struct deferred_split deferred_split_queue; +# 756 "./include/linux/mmzone.h" + struct lruvec __lruvec; + + unsigned long flags; + + struct zone_padding _pad2_; + + + struct per_cpu_nodestat *per_cpu_nodestats; + atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; +} pg_data_t; +# 779 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pgdat_end_pfn(pg_data_t *pgdat) +{ + return pgdat->node_start_pfn + pgdat->node_spanned_pages; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pgdat_is_empty(pg_data_t *pgdat) +{ + return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; +} + +# 1 "./include/linux/memory_hotplug.h" 1 + + + + +# 1 "./include/linux/mmzone.h" 1 +# 6 "./include/linux/memory_hotplug.h" 2 + + + + +struct page; +struct zone; +struct pglist_data; +struct mem_section; +struct memory_block; +struct resource; +struct vmem_altmap; +# 40 "./include/linux/memory_hotplug.h" +enum { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, + SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, + MIX_SECTION_INFO, + NODE_INFO, + MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO, +}; + + +enum { + + MMOP_OFFLINE = 0, + + MMOP_ONLINE, + + MMOP_ONLINE_KERNEL, + + MMOP_ONLINE_MOVABLE, +}; + + + + + + + +struct mhp_params { + struct vmem_altmap *altmap; + pgprot_t pgprot; +}; +# 78 "./include/linux/memory_hotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned zone_span_seqbegin(struct zone *zone) +{ + return read_seqbegin(&zone->span_seqlock); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int zone_span_seqretry(struct zone *zone, unsigned iv) +{ + return read_seqretry(&zone->span_seqlock, iv); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_span_writelock(struct zone *zone) +{ + write_seqlock(&zone->span_seqlock); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_span_writeunlock(struct zone *zone) +{ + write_sequnlock(&zone->span_seqlock); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_seqlock_init(struct zone *zone) +{ + do { do { static struct lock_class_key __key; __seqcount_init((&(&zone->span_seqlock)->seqcount), "&(&zone->span_seqlock)->seqcount", &__key); } while (0); do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&(&zone->span_seqlock)->lock), "&(&zone->span_seqlock)->lock", &__key, LD_WAIT_CONFIG); } while (0); } while (0); +} +extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); +extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); +extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); + +extern int online_pages(unsigned long pfn, unsigned long nr_pages, + int online_type, int nid); +extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, + unsigned long end_pfn); +extern unsigned long __offline_isolated_pages(unsigned long start_pfn, + unsigned long end_pfn); + +typedef void (*online_page_callback_t)(struct page *page, unsigned int order); + +extern void generic_online_page(struct page *page, unsigned int order); +extern int set_online_page_callback(online_page_callback_t callback); +extern int restore_online_page_callback(online_page_callback_t callback); + +extern int try_online_node(int nid); + +extern int arch_add_memory(int nid, u64 start, u64 size, + struct mhp_params *params); +extern u64 max_mem_size; + +extern int memhp_online_type_from_str(const char *str); + + +extern int memhp_default_online_type; + +extern bool movable_node_enabled; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool movable_node_is_enabled(void) +{ + return movable_node_enabled; +} + +extern void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap); +extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, + struct vmem_altmap *altmap); + + +extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + struct mhp_params *params); +# 148 "./include/linux/memory_hotplug.h" +int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + struct mhp_params *params); + + + +extern int memory_add_physaddr_to_nid(u64 start); +# 198 "./include/linux/memory_hotplug.h" +extern pg_data_t *node_data[]; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ + node_data[nid] = pgdat; +} +# 222 "./include/linux/memory_hotplug.h" +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) register_page_bootmem_info_node(struct pglist_data *pgdat); + + + + + +extern void put_page_bootmem(struct page *page); +extern void get_page_bootmem(unsigned long ingo, struct page *page, + unsigned long type); + +void get_online_mems(void); +void put_online_mems(void); + +void mem_hotplug_begin(void); +void mem_hotplug_done(void); +# 291 "./include/linux/memory_hotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) +{ + do { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); *flags = _raw_spin_lock_irqsave(spinlock_check(&pgdat->node_size_lock)); } while (0); } while (0); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void pgdat_resize_init(struct pglist_data *pgdat) +{ + do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&pgdat->node_size_lock), "&pgdat->node_size_lock", &__key, LD_WAIT_CONFIG); } while (0); +} +# 317 "./include/linux/memory_hotplug.h" +extern void try_offline_node(int nid); +extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); +extern int remove_memory(int nid, u64 start, u64 size); +extern void __remove_memory(int nid, u64 start, u64 size); +extern int offline_and_remove_memory(int nid, u64 start, u64 size); +# 339 "./include/linux/memory_hotplug.h" +extern void set_zone_contiguous(struct zone *zone); +extern void clear_zone_contiguous(struct zone *zone); + +extern void __attribute__((__section__(".ref.text"))) __attribute__((__noinline__)) free_area_init_core_hotplug(int nid); +extern int __add_memory(int nid, u64 start, u64 size); +extern int add_memory(int nid, u64 start, u64 size); +extern int add_memory_resource(int nid, struct resource *resource); +extern int add_memory_driver_managed(int nid, u64 start, u64 size, + const char *resource_name); +extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages, struct vmem_altmap *altmap); +extern void remove_pfn_range_from_zone(struct zone *zone, + unsigned long start_pfn, + unsigned long nr_pages); +extern bool is_memblock_offlined(struct memory_block *mem); +extern int sparse_add_section(int nid, unsigned long pfn, + unsigned long nr_pages, struct vmem_altmap *altmap); +extern void sparse_remove_section(struct mem_section *ms, + unsigned long pfn, unsigned long nr_pages, + unsigned long map_offset, struct vmem_altmap *altmap); +extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, + unsigned long pnum); +extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, + int online_type); +extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, + unsigned long nr_pages); +# 790 "./include/linux/mmzone.h" 2 + +void build_all_zonelists(pg_data_t *pgdat); +void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, + enum zone_type highest_zoneidx); +bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, + int highest_zoneidx, unsigned int alloc_flags, + long free_pages); +bool zone_watermark_ok(struct zone *z, unsigned int order, + unsigned long mark, int highest_zoneidx, + unsigned int alloc_flags); +bool zone_watermark_ok_safe(struct zone *z, unsigned int order, + unsigned long mark, int highest_zoneidx); +enum memmap_context { + MEMMAP_EARLY, + MEMMAP_HOTPLUG, +}; +extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, + unsigned long size); + +extern void lruvec_init(struct lruvec *lruvec); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) +{ + + return lruvec->pgdat; + + + +} + +extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); + + +void memory_present(int nid, unsigned long start, unsigned long end); + + + + + +void memblocks_present(void); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int local_memory_node(int node_id) { return node_id; }; +# 851 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool managed_zone(struct zone *zone) +{ + return zone_managed_pages(zone); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool populated_zone(struct zone *zone) +{ + return zone->present_pages; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int zone_to_nid(struct zone *zone) +{ + return zone->node; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_set_nid(struct zone *zone, int nid) +{ + zone->node = nid; +} +# 881 "./include/linux/mmzone.h" +extern int movable_zone; +# 894 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_highmem_idx(enum zone_type idx) +{ + + + + + return 0; + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_highmem(struct zone *zone) +{ + + + + return 0; + +} + + +struct ctl_table; + +int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, + size_t *, loff_t *); +extern int sysctl_lowmem_reserve_ratio[5]; +int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, + size_t *, loff_t *); +int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, + void *, size_t *, loff_t *); +int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, + void *, size_t *, loff_t *); +int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, + void *, size_t *, loff_t *); +int numa_zonelist_order_handler(struct ctl_table *, int, + void *, size_t *, loff_t *); +extern int percpu_pagelist_fraction; +extern char numa_zonelist_order[]; +# 949 "./include/linux/mmzone.h" +# 1 "./arch/x86/include/asm/mmzone.h" 1 + + + + +# 1 "./arch/x86/include/asm/mmzone_64.h" 1 +# 11 "./arch/x86/include/asm/mmzone_64.h" +# 1 "./arch/x86/include/asm/smp.h" 1 +# 12 "./arch/x86/include/asm/smp.h" +# 1 "./arch/x86/include/asm/mpspec.h" 1 + + + + + +# 1 "./arch/x86/include/asm/mpspec_def.h" 1 +# 22 "./arch/x86/include/asm/mpspec_def.h" +struct mpf_intel { + char signature[4]; + unsigned int physptr; + unsigned char length; + unsigned char specification; + unsigned char checksum; + unsigned char feature1; + unsigned char feature2; + unsigned char feature3; + unsigned char feature4; + unsigned char feature5; +}; + + + +struct mpc_table { + char signature[4]; + unsigned short length; + char spec; + char checksum; + char oem[8]; + char productid[12]; + unsigned int oemptr; + unsigned short oemsize; + unsigned short oemcount; + unsigned int lapic; + unsigned int reserved; +}; +# 68 "./arch/x86/include/asm/mpspec_def.h" +struct mpc_cpu { + unsigned char type; + unsigned char apicid; + unsigned char apicver; + unsigned char cpuflag; + unsigned int cpufeature; + unsigned int featureflag; + unsigned int reserved[2]; +}; + +struct mpc_bus { + unsigned char type; + unsigned char busid; + unsigned char bustype[6]; +}; +# 106 "./arch/x86/include/asm/mpspec_def.h" +struct mpc_ioapic { + unsigned char type; + unsigned char apicid; + unsigned char apicver; + unsigned char flags; + unsigned int apicaddr; +}; + +struct mpc_intsrc { + unsigned char type; + unsigned char irqtype; + unsigned short irqflag; + unsigned char srcbus; + unsigned char srcbusirq; + unsigned char dstapic; + unsigned char dstirq; +}; + +enum mp_irq_source_types { + mp_INT = 0, + mp_NMI = 1, + mp_SMI = 2, + mp_ExtINT = 3 +}; +# 145 "./arch/x86/include/asm/mpspec_def.h" +struct mpc_lintsrc { + unsigned char type; + unsigned char irqtype; + unsigned short irqflag; + unsigned char srcbusid; + unsigned char srcbusirq; + unsigned char destapic; + unsigned char destapiclint; +}; + + + +struct mpc_oemtable { + char signature[4]; + unsigned short length; + char rev; + char checksum; + char mpc[8]; +}; +# 177 "./arch/x86/include/asm/mpspec_def.h" +enum mp_bustype { + MP_BUS_ISA = 1, + MP_BUS_EISA, + MP_BUS_PCI, +}; +# 7 "./arch/x86/include/asm/mpspec.h" 2 +# 1 "./arch/x86/include/asm/x86_init.h" 1 + + + + + + +struct mpc_bus; +struct mpc_cpu; +struct mpc_table; +struct cpuinfo_x86; +# 23 "./arch/x86/include/asm/x86_init.h" +struct x86_init_mpparse { + void (*mpc_record)(unsigned int mode); + void (*setup_ioapic_ids)(void); + int (*mpc_apic_id)(struct mpc_cpu *m); + void (*smp_read_mpc_oem)(struct mpc_table *mpc); + void (*mpc_oem_pci_bus)(struct mpc_bus *m); + void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); + void (*find_smp_config)(void); + void (*get_smp_config)(unsigned int early); +}; +# 42 "./arch/x86/include/asm/x86_init.h" +struct x86_init_resources { + void (*probe_roms)(void); + void (*reserve_resources)(void); + char *(*memory_setup)(void); +}; +# 56 "./arch/x86/include/asm/x86_init.h" +struct x86_init_irqs { + void (*pre_vector_init)(void); + void (*intr_init)(void); + void (*intr_mode_select)(void); + void (*intr_mode_init)(void); +}; + + + + + + +struct x86_init_oem { + void (*arch_setup)(void); + void (*banner)(void); +}; +# 80 "./arch/x86/include/asm/x86_init.h" +struct x86_init_paging { + void (*pagetable_init)(void); +}; +# 91 "./arch/x86/include/asm/x86_init.h" +struct x86_init_timers { + void (*setup_percpu_clockev)(void); + void (*timer_init)(void); + void (*wallclock_init)(void); +}; + + + + + +struct x86_init_iommu { + int (*iommu_init)(void); +}; +# 112 "./arch/x86/include/asm/x86_init.h" +struct x86_init_pci { + int (*arch_init)(void); + int (*init)(void); + void (*init_irq)(void); + void (*fixup_irqs)(void); +}; +# 127 "./arch/x86/include/asm/x86_init.h" +struct x86_hyper_init { + void (*init_platform)(void); + void (*guest_late_init)(void); + bool (*x2apic_available)(void); + void (*init_mem_mapping)(void); + void (*init_after_bootmem)(void); +}; + + + + + + + +struct x86_init_acpi { + void (*set_root_pointer)(u64 addr); + u64 (*get_root_pointer)(void); + void (*reduced_hw_early_init)(void); +}; + + + + + +struct x86_init_ops { + struct x86_init_resources resources; + struct x86_init_mpparse mpparse; + struct x86_init_irqs irqs; + struct x86_init_oem oem; + struct x86_init_paging paging; + struct x86_init_timers timers; + struct x86_init_iommu iommu; + struct x86_init_pci pci; + struct x86_hyper_init hyper; + struct x86_init_acpi acpi; +}; + + + + + + +struct x86_cpuinit_ops { + void (*setup_percpu_clockev)(void); + void (*early_percpu_clock_init)(void); + void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); +}; + +struct timespec64; +# 196 "./arch/x86/include/asm/x86_init.h" +struct x86_legacy_devices { + int pnpbios; +}; +# 209 "./arch/x86/include/asm/x86_init.h" +enum x86_legacy_i8042_state { + X86_LEGACY_I8042_PLATFORM_ABSENT, + X86_LEGACY_I8042_FIRMWARE_ABSENT, + X86_LEGACY_I8042_EXPECTED_PRESENT, +}; +# 227 "./arch/x86/include/asm/x86_init.h" +struct x86_legacy_features { + enum x86_legacy_i8042_state i8042; + int rtc; + int warm_reset; + int no_vga; + int reserve_bios_regions; + struct x86_legacy_devices devices; +}; + + + + + + +struct x86_hyper_runtime { + void (*pin_vcpu)(int cpu); +}; +# 266 "./arch/x86/include/asm/x86_init.h" +struct x86_platform_ops { + unsigned long (*calibrate_cpu)(void); + unsigned long (*calibrate_tsc)(void); + void (*get_wallclock)(struct timespec64 *ts); + int (*set_wallclock)(const struct timespec64 *ts); + void (*iommu_shutdown)(void); + bool (*is_untracked_pat_range)(u64 start, u64 end); + void (*nmi_init)(void); + unsigned char (*get_nmi_reason)(void); + void (*save_sched_clock_state)(void); + void (*restore_sched_clock_state)(void); + void (*apic_post_init)(void); + struct x86_legacy_features legacy; + void (*set_legacy_features)(void); + struct x86_hyper_runtime hyper; +}; + +struct pci_dev; + +struct x86_msi_ops { + int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); + void (*teardown_msi_irq)(unsigned int irq); + void (*teardown_msi_irqs)(struct pci_dev *dev); + void (*restore_msi_irqs)(struct pci_dev *dev); +}; + +struct x86_apic_ops { + unsigned int (*io_apic_read) (unsigned int apic, unsigned int reg); + void (*restore)(void); +}; + +extern struct x86_init_ops x86_init; +extern struct x86_cpuinit_ops x86_cpuinit; +extern struct x86_platform_ops x86_platform; +extern struct x86_msi_ops x86_msi; +extern struct x86_apic_ops x86_apic_ops; + +extern void x86_early_init_platform_quirks(void); +extern void x86_init_noop(void); +extern void x86_init_uint_noop(unsigned int unused); +extern bool bool_x86_init_noop(void); +extern void x86_op_int_noop(int cpu); +extern bool x86_pnpbios_disabled(void); +# 8 "./arch/x86/include/asm/mpspec.h" 2 +# 1 "./arch/x86/include/asm/apicdef.h" 1 +# 179 "./arch/x86/include/asm/apicdef.h" +struct local_apic { + + struct { unsigned int __reserved[4]; } __reserved_01; + + struct { unsigned int __reserved[4]; } __reserved_02; + + struct { + unsigned int __reserved_1 : 24, + phys_apic_id : 4, + __reserved_2 : 4; + unsigned int __reserved[3]; + } id; + + const + struct { + unsigned int version : 8, + __reserved_1 : 8, + max_lvt : 8, + __reserved_2 : 8; + unsigned int __reserved[3]; + } version; + + struct { unsigned int __reserved[4]; } __reserved_03; + + struct { unsigned int __reserved[4]; } __reserved_04; + + struct { unsigned int __reserved[4]; } __reserved_05; + + struct { unsigned int __reserved[4]; } __reserved_06; + + struct { + unsigned int priority : 8, + __reserved_1 : 24; + unsigned int __reserved_2[3]; + } tpr; + + const + struct { + unsigned int priority : 8, + __reserved_1 : 24; + unsigned int __reserved_2[3]; + } apr; + + const + struct { + unsigned int priority : 8, + __reserved_1 : 24; + unsigned int __reserved_2[3]; + } ppr; + + struct { + unsigned int eoi; + unsigned int __reserved[3]; + } eoi; + + struct { unsigned int __reserved[4]; } __reserved_07; + + struct { + unsigned int __reserved_1 : 24, + logical_dest : 8; + unsigned int __reserved_2[3]; + } ldr; + + struct { + unsigned int __reserved_1 : 28, + model : 4; + unsigned int __reserved_2[3]; + } dfr; + + struct { + unsigned int spurious_vector : 8, + apic_enabled : 1, + focus_cpu : 1, + __reserved_2 : 22; + unsigned int __reserved_3[3]; + } svr; + + struct { + unsigned int bitfield; + unsigned int __reserved[3]; + } isr [8]; + + struct { + unsigned int bitfield; + unsigned int __reserved[3]; + } tmr [8]; + + struct { + unsigned int bitfield; + unsigned int __reserved[3]; + } irr [8]; + + union { + struct { + unsigned int send_cs_error : 1, + receive_cs_error : 1, + send_accept_error : 1, + receive_accept_error : 1, + __reserved_1 : 1, + send_illegal_vector : 1, + receive_illegal_vector : 1, + illegal_register_address : 1, + __reserved_2 : 24; + unsigned int __reserved_3[3]; + } error_bits; + struct { + unsigned int errors; + unsigned int __reserved_3[3]; + } all_errors; + } esr; + + struct { unsigned int __reserved[4]; } __reserved_08; + + struct { unsigned int __reserved[4]; } __reserved_09; + + struct { unsigned int __reserved[4]; } __reserved_10; + + struct { unsigned int __reserved[4]; } __reserved_11; + + struct { unsigned int __reserved[4]; } __reserved_12; + + struct { unsigned int __reserved[4]; } __reserved_13; + + struct { unsigned int __reserved[4]; } __reserved_14; + + struct { + unsigned int vector : 8, + delivery_mode : 3, + destination_mode : 1, + delivery_status : 1, + __reserved_1 : 1, + level : 1, + trigger : 1, + __reserved_2 : 2, + shorthand : 2, + __reserved_3 : 12; + unsigned int __reserved_4[3]; + } icr1; + + struct { + union { + unsigned int __reserved_1 : 24, + phys_dest : 4, + __reserved_2 : 4; + unsigned int __reserved_3 : 24, + logical_dest : 8; + } dest; + unsigned int __reserved_4[3]; + } icr2; + + struct { + unsigned int vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + timer_mode : 1, + __reserved_3 : 14; + unsigned int __reserved_4[3]; + } lvt_timer; + + struct { + unsigned int vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + unsigned int __reserved_4[3]; + } lvt_thermal; + + struct { + unsigned int vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + unsigned int __reserved_4[3]; + } lvt_pc; + + struct { + unsigned int vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + unsigned int __reserved_3[3]; + } lvt_lint0; + + struct { + unsigned int vector : 8, + delivery_mode : 3, + __reserved_1 : 1, + delivery_status : 1, + polarity : 1, + remote_irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + unsigned int __reserved_3[3]; + } lvt_lint1; + + struct { + unsigned int vector : 8, + __reserved_1 : 4, + delivery_status : 1, + __reserved_2 : 3, + mask : 1, + __reserved_3 : 15; + unsigned int __reserved_4[3]; + } lvt_error; + + struct { + unsigned int initial_count; + unsigned int __reserved_2[3]; + } timer_icr; + + const + struct { + unsigned int curr_count; + unsigned int __reserved_2[3]; + } timer_ccr; + + struct { unsigned int __reserved[4]; } __reserved_16; + + struct { unsigned int __reserved[4]; } __reserved_17; + + struct { unsigned int __reserved[4]; } __reserved_18; + + struct { unsigned int __reserved[4]; } __reserved_19; + + struct { + unsigned int divisor : 4, + __reserved_1 : 28; + unsigned int __reserved_2[3]; + } timer_dcr; + + struct { unsigned int __reserved[4]; } __reserved_20; + +} __attribute__ ((packed)); +# 435 "./arch/x86/include/asm/apicdef.h" +enum ioapic_irq_destination_types { + dest_Fixed = 0, + dest_LowestPrio = 1, + dest_SMI = 2, + dest__reserved_1 = 3, + dest_NMI = 4, + dest_INIT = 5, + dest__reserved_2 = 6, + dest_ExtINT = 7 +}; +# 9 "./arch/x86/include/asm/mpspec.h" 2 + +extern int pic_mode; +# 37 "./arch/x86/include/asm/mpspec.h" +extern int mp_bus_id_to_type[256]; + + +extern unsigned long mp_bus_not_pci[(((256) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; + +extern unsigned int boot_cpu_physical_apicid; +extern u8 boot_cpu_apic_version; +extern unsigned long mp_lapic_addr; + + +extern int smp_found_config; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_smp_config(void) +{ + x86_init.mpparse.get_smp_config(0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void early_get_smp_config(void) +{ + x86_init.mpparse.get_smp_config(1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void find_smp_config(void) +{ + x86_init.mpparse.find_smp_config(); +} + + +extern void e820__memblock_alloc_reserved_mpc_new(void); +extern int enable_update_mptable; +extern int default_mpc_apic_id(struct mpc_cpu *m); +extern void default_smp_read_mpc_oem(struct mpc_table *mpc); + +extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str); + + + +extern void default_find_smp_config(void); +extern void default_get_smp_config(unsigned int early); +# 89 "./arch/x86/include/asm/mpspec.h" +int generic_processor_info(int apicid, int version); + + + +struct physid_mask { + unsigned long mask[(((32768) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; +}; + +typedef struct physid_mask physid_mask_t; +# 132 "./arch/x86/include/asm/mpspec.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long physids_coerce(physid_mask_t *map) +{ + return map->mask[0]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void physids_promote(unsigned long physids, physid_mask_t *map) +{ + bitmap_zero((*map).mask, 32768); + map->mask[0] = physids; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void physid_set_mask_of_physid(int physid, physid_mask_t *map) +{ + bitmap_zero((*map).mask, 32768); + set_bit(physid, (*map).mask); +} + + + + +extern physid_mask_t phys_cpu_present_map; +# 13 "./arch/x86/include/asm/smp.h" 2 +# 1 "./arch/x86/include/asm/apic.h" 1 +# 11 "./arch/x86/include/asm/apic.h" +# 1 "./arch/x86/include/asm/fixmap.h" 1 +# 29 "./arch/x86/include/asm/fixmap.h" +# 1 "./arch/x86/include/asm/acpi.h" 1 +# 9 "./arch/x86/include/asm/acpi.h" +# 1 "./include/acpi/pdc_intel.h" 1 +# 10 "./arch/x86/include/asm/acpi.h" 2 + +# 1 "./arch/x86/include/asm/numa.h" 1 + + + + + + +# 1 "./arch/x86/include/asm/topology.h" 1 +# 42 "./arch/x86/include/asm/topology.h" +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_x86_cpu_to_node_map; extern __attribute__((section(".data..percpu" ""))) __typeof__(int) x86_cpu_to_node_map; extern __typeof__(int) *x86_cpu_to_node_map_early_ptr; extern __typeof__(int) x86_cpu_to_node_map_early_map[]; + + + + + +extern int __cpu_to_node(int cpu); + + +extern int early_cpu_to_node(int cpu); +# 64 "./arch/x86/include/asm/topology.h" +extern cpumask_var_t node_to_cpumask_map[(1 << 10)]; + + +extern const struct cpumask *cpumask_of_node(int node); +# 76 "./arch/x86/include/asm/topology.h" +extern void setup_node_to_cpumask_map(void); + + + +extern int __node_distance(int, int); +# 103 "./arch/x86/include/asm/topology.h" +# 1 "./include/asm-generic/topology.h" 1 +# 104 "./arch/x86/include/asm/topology.h" 2 + +extern const struct cpumask *cpu_coregroup_mask(int cpu); +# 118 "./arch/x86/include/asm/topology.h" +extern unsigned int __max_logical_packages; + + +extern unsigned int __max_die_per_package; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int topology_max_die_per_package(void) +{ + return __max_die_per_package; +} + +extern int __max_smt_threads; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int topology_max_smt_threads(void) +{ + return __max_smt_threads; +} + +int topology_update_package_map(unsigned int apicid, unsigned int cpu); +int topology_update_die_map(unsigned int dieid, unsigned int cpu); +int topology_phys_to_logical_pkg(unsigned int pkg); +int topology_phys_to_logical_die(unsigned int die, unsigned int cpu); +bool topology_is_primary_thread(unsigned int cpu); +bool topology_smt_supported(void); +# 156 "./arch/x86/include/asm/topology.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_fix_phys_package_id(int num, u32 slot) +{ +} + +struct pci_bus; +int x86_pci_root_bus_node(int bus); +void x86_pci_root_bus_resources(int bus, struct list_head *resources); + +extern bool x86_topology_update; + + + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_sched_core_priority; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(int) sched_core_priority; +extern unsigned int __attribute__((__section__(".data..read_mostly"))) sysctl_sched_itmt_enabled; + + +void sched_set_itmt_core_prio(int prio, int core_cpu); + + +int sched_set_itmt_support(void); + + +void sched_clear_itmt_support(void); +# 199 "./arch/x86/include/asm/topology.h" +extern struct static_key_false arch_scale_freq_key; + + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_arch_freq_scale; extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) arch_freq_scale; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long arch_scale_freq_capacity(int cpu) +{ + return (*({ do { const void *__vpp_verify = (typeof((&(arch_freq_scale)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(arch_freq_scale)))) *)((&(arch_freq_scale))))); (typeof((typeof(*((&(arch_freq_scale)))) *)((&(arch_freq_scale))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); +} + + +extern void arch_scale_freq_tick(void); + + +extern void arch_set_max_freq_ratio(bool turbo_disabled); +# 8 "./arch/x86/include/asm/numa.h" 2 +# 21 "./arch/x86/include/asm/numa.h" +extern int numa_off; +# 31 "./arch/x86/include/asm/numa.h" +extern s16 __apicid_to_node[32768]; +extern nodemask_t numa_nodes_parsed __attribute__((__section__(".init.data"))); + +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) numa_add_memblk(int nodeid, u64 start, u64 end); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) numa_set_distance(int from, int to, int distance); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_apicid_to_node(int apicid, s16 node) +{ + __apicid_to_node[apicid] = node; +} + +extern int numa_cpu_node(int cpu); +# 60 "./arch/x86/include/asm/numa.h" +extern void numa_set_node(int cpu, int node); +extern void numa_clear_node(int cpu); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) init_cpu_to_node(void); +extern void numa_add_cpu(int cpu); +extern void numa_remove_cpu(int cpu); +# 74 "./arch/x86/include/asm/numa.h" +void debug_cpumask_set_cpu(int cpu, int node, bool enable); + + + + + +void numa_emu_cmdline(char *); +# 12 "./arch/x86/include/asm/acpi.h" 2 +# 1 "./arch/x86/include/asm/fixmap.h" 1 +# 13 "./arch/x86/include/asm/acpi.h" 2 +# 23 "./arch/x86/include/asm/acpi.h" +extern int acpi_lapic; +extern int acpi_ioapic; +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; +extern int acpi_skip_timer_override; +extern int acpi_use_timer_override; +extern int acpi_fix_pin2_polarity; +extern int acpi_disable_cmcff; + +extern u8 acpi_sci_flags; +extern u32 acpi_sci_override_gsi; +void acpi_pic_sci_set_trigger(unsigned int, u16); + +struct device; + +extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, + int trigger, int polarity); +extern void (*__acpi_unregister_gsi)(u32 gsi); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void acpi_noirq_set(void) { acpi_noirq = 1; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} + + +extern int (*acpi_suspend_lowlevel)(void); + + +unsigned long acpi_get_wakeup_address(void); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + + + + + + + if (boot_cpu_data.x86 == 0x0F && + boot_cpu_data.x86_vendor == 2 && + boot_cpu_data.x86_model <= 0x05 && + boot_cpu_data.x86_stepping < 0x0A) + return 1; + else if ((__builtin_constant_p((19*32 + (4))) && ( ((((19*32 + (4)))>>5)==(0) && (1UL<<(((19*32 + (4)))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((19*32 + (4)))>>5)==(1) && (1UL<<(((19*32 + (4)))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((19*32 + (4)))>>5)==(2) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(3) && (1UL<<(((19*32 + (4)))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((19*32 + (4)))>>5)==(4) && (1UL<<(((19*32 + (4)))&31) & (0) )) || ((((19*32 + (4)))>>5)==(5) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(6) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(7) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(8) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(9) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(10) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(11) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(12) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(13) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(14) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(15) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(16) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(17) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((((19*32 + (4)))>>5)==(18) && (1UL<<(((19*32 + (4)))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((19*32 + (4)), (unsigned long *)((&boot_cpu_data)->x86_capability)))) + return 1; + else + return max_cstate; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_has_acpi_pdc(void) +{ + struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(cpu_info)))) *)((&(cpu_info))))); (typeof((typeof(*((&(cpu_info)))) *)((&(cpu_info))))) (__ptr + (((__per_cpu_offset[(0)])))); }); })); + return (c->x86_vendor == 0 || + c->x86_vendor == 5); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_acpi_set_pdc_bits(u32 *buf) +{ + struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(cpu_info)))) *)((&(cpu_info))))); (typeof((typeof(*((&(cpu_info)))) *)((&(cpu_info))))) (__ptr + (((__per_cpu_offset[(0)])))); }); })); + + buf[2] |= ((0x0010) | (0x0008) | (0x0002) | (0x0100) | (0x0200)); + + if ((__builtin_constant_p(( 4*32+ 7)) && ( (((( 4*32+ 7))>>5)==(0) && (1UL<<((( 4*32+ 7))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+ 7))>>5)==(1) && (1UL<<((( 4*32+ 7))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+ 7))>>5)==(2) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(3) && (1UL<<((( 4*32+ 7))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+ 7))>>5)==(4) && (1UL<<((( 4*32+ 7))&31) & (0) )) || (((( 4*32+ 7))>>5)==(5) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(6) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(7) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(8) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(9) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(10) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(11) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(12) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(13) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(14) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(15) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(16) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(17) && (1UL<<((( 4*32+ 7))&31) & 0 )) || (((( 4*32+ 7))>>5)==(18) && (1UL<<((( 4*32+ 7))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 4*32+ 7), (unsigned long *)((c)->x86_capability)))) + buf[2] |= ((0x0008) | (0x0002) | (0x0020) | (0x0800) | (0x0001)); + + if ((__builtin_constant_p(( 0*32+22)) && ( (((( 0*32+22))>>5)==(0) && (1UL<<((( 0*32+22))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 0*32+22))>>5)==(1) && (1UL<<((( 0*32+22))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 0*32+22))>>5)==(2) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(3) && (1UL<<((( 0*32+22))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 0*32+22))>>5)==(4) && (1UL<<((( 0*32+22))&31) & (0) )) || (((( 0*32+22))>>5)==(5) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(6) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(7) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(8) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(9) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(10) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(11) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(12) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(13) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(14) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(15) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(16) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(17) && (1UL<<((( 0*32+22))&31) & 0 )) || (((( 0*32+22))>>5)==(18) && (1UL<<((( 0*32+22))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 0*32+22), (unsigned long *)((c)->x86_capability)))) + buf[2] |= (0x0004); + + + + + if (!(__builtin_constant_p(( 4*32+ 3)) && ( (((( 4*32+ 3))>>5)==(0) && (1UL<<((( 4*32+ 3))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+ 3))>>5)==(1) && (1UL<<((( 4*32+ 3))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+ 3))>>5)==(2) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(3) && (1UL<<((( 4*32+ 3))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+ 3))>>5)==(4) && (1UL<<((( 4*32+ 3))&31) & (0) )) || (((( 4*32+ 3))>>5)==(5) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(6) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(7) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(8) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(9) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(10) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(11) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(12) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(13) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(14) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(15) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(16) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(17) && (1UL<<((( 4*32+ 3))&31) & 0 )) || (((( 4*32+ 3))>>5)==(18) && (1UL<<((( 4*32+ 3))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 4*32+ 3), (unsigned long *)((c)->x86_capability)))) + buf[2] &= ~((0x0200)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool acpi_has_cpu_in_madt(void) +{ + return !!acpi_lapic; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void acpi_arch_set_root_pointer(u64 addr) +{ + x86_init.acpi.set_root_pointer(addr); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 acpi_arch_get_root_pointer(void) +{ + return x86_init.acpi.get_root_pointer(); +} + +void acpi_generic_reduced_hw_init(void); + +void x86_default_set_root_pointer(u64 addr); +u64 x86_default_get_root_pointer(void); +# 159 "./arch/x86/include/asm/acpi.h" +extern int x86_acpi_numa_init(void); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) +{ +# 180 "./arch/x86/include/asm/acpi.h" + return ((pgprot_t) { ((((((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 1)| 0|(((pteval_t)(1)) << 5)|(((pteval_t)(1)) << 63)|(((pteval_t)(1)) << 6)| 0|(((pteval_t)(1)) << 8)) | 0) & __default_kernel_pte_mask) } ); +} +# 30 "./arch/x86/include/asm/fixmap.h" 2 + + + + + + +# 1 "./arch/x86/include/uapi/asm/vsyscall.h" 1 + + + + +enum vsyscall_num { + __NR_vgettimeofday, + __NR_vtime, + __NR_vgetcpu, +}; +# 37 "./arch/x86/include/asm/fixmap.h" 2 +# 75 "./arch/x86/include/asm/fixmap.h" +enum fixed_addresses { + + + + + VSYSCALL_PAGE = (((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) - (-10UL << 20)) >> 12, + + + FIX_DBGP_BASE, + FIX_EARLYCON_MEM_BASE, + + FIX_OHCI1394_BASE, + + + FIX_APIC_BASE, + + + FIX_IO_APIC_BASE_0, + FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + 128 - 1, +# 103 "./arch/x86/include/asm/fixmap.h" + FIX_PARAVIRT_BOOTMAP, + + + FIX_LNW_VRTC, + + + + + FIX_APEI_GHES_IRQ, + FIX_APEI_GHES_NMI, + + + __end_of_permanent_fixed_addresses, +# 127 "./arch/x86/include/asm/fixmap.h" + FIX_BTMAP_END = + (__end_of_permanent_fixed_addresses ^ + (__end_of_permanent_fixed_addresses + (64 * 8) - 1)) & + -512 + ? __end_of_permanent_fixed_addresses + (64 * 8) - + (__end_of_permanent_fixed_addresses & ((64 * 8) - 1)) + : __end_of_permanent_fixed_addresses, + FIX_BTMAP_BEGIN = FIX_BTMAP_END + (64 * 8) - 1, + + + + + FIX_TBOOT_BASE, + + __end_of_fixed_addresses +}; + + +extern void reserve_top_address(unsigned long reserve); + + + + + + +extern int fixmaps_set; + +extern pte_t *kmap_pte; +extern pte_t *pkmap_page_table; + +void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); +void native_set_fixmap(unsigned idx, + phys_addr_t phys, pgprot_t flags); +# 180 "./arch/x86/include/asm/fixmap.h" +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) *early_memremap_encrypted(resource_size_t phys_addr, + unsigned long size); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) *early_memremap_encrypted_wp(resource_size_t phys_addr, + unsigned long size); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) *early_memremap_decrypted(resource_size_t phys_addr, + unsigned long size); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) *early_memremap_decrypted_wp(resource_size_t phys_addr, + unsigned long size); + +# 1 "./include/asm-generic/fixmap.h" 1 +# 30 "./include/asm-generic/fixmap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long fix_to_virt(const unsigned int idx) +{ + do { extern void __compiletime_assert_796(void) __attribute__((__error__("BUILD_BUG_ON failed: " "idx >= __end_of_fixed_addresses"))); if (!(!(idx >= __end_of_fixed_addresses))) __compiletime_assert_796(); } while (0); + return (((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) - ((idx) << 12)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long virt_to_fix(const unsigned long vaddr) +{ + do { if (__builtin_expect(!!(vaddr >= ((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) || vaddr < (((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) - (__end_of_permanent_fixed_addresses << 12))), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (797)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/asm-generic/fixmap.h"), "i" (38), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (798)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + return ((((((((-10UL << 20) + ((1UL) << 12))-1) | ((__typeof__((-10UL << 20) + ((1UL) << 12)))((1<<21)-1)))+1) - ((1UL) << 12)) - ((vaddr)&(~(((1UL) << 12)-1)))) >> 12); +} +# 190 "./arch/x86/include/asm/fixmap.h" 2 + + + + +void __early_set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); +# 12 "./arch/x86/include/asm/apic.h" 2 + + +# 1 "./arch/x86/include/asm/hardirq.h" 1 + + + + + + +typedef struct { + u16 __softirq_pending; + + u8 kvm_cpu_l1tf_flush_l1d; + + unsigned int __nmi_count; + + unsigned int apic_timer_irqs; + unsigned int irq_spurious_count; + unsigned int icr_read_retry_count; + + + unsigned int kvm_posted_intr_ipis; + unsigned int kvm_posted_intr_wakeup_ipis; + unsigned int kvm_posted_intr_nested_ipis; + + unsigned int x86_platform_ipis; + unsigned int apic_perf_irqs; + unsigned int apic_irq_work_irqs; + + unsigned int irq_resched_count; + unsigned int irq_call_count; + + unsigned int irq_tlb_count; + + unsigned int irq_thermal_count; + + + unsigned int irq_threshold_count; + + + unsigned int irq_deferred_error_count; + + + unsigned int irq_hv_callback_count; + + + unsigned int irq_hv_reenlightenment_count; + unsigned int hyperv_stimer0_count; + +} __attribute__((__aligned__((1 << (6))))) irq_cpustat_t; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_irq_stat; extern __attribute__((section(".data..percpu" "..shared_aligned"))) __typeof__(irq_cpustat_t) irq_stat __attribute__((__aligned__((1 << (6))))); + + + + + +extern void ack_bad_irq(unsigned int irq); + +extern u64 arch_irq_stat_cpu(unsigned int cpu); + + +extern u64 arch_irq_stat(void); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kvm_set_cpu_l1tf_flush_l1d(void) +{ + ({ __this_cpu_preempt_check("write"); do { do { const void *__vpp_verify = (typeof((&(irq_stat.kvm_cpu_l1tf_flush_l1d)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(irq_stat.kvm_cpu_l1tf_flush_l1d)) { case 1: do { typedef typeof((irq_stat.kvm_cpu_l1tf_flush_l1d)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (1); (void)pto_tmp__; } switch (sizeof((irq_stat.kvm_cpu_l1tf_flush_l1d))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "qi" ((pto_T__)(1))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(1))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(1))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "re" ((pto_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((irq_stat.kvm_cpu_l1tf_flush_l1d)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (1); (void)pto_tmp__; } switch (sizeof((irq_stat.kvm_cpu_l1tf_flush_l1d))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "qi" ((pto_T__)(1))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(1))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(1))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "re" ((pto_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((irq_stat.kvm_cpu_l1tf_flush_l1d)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (1); (void)pto_tmp__; } switch (sizeof((irq_stat.kvm_cpu_l1tf_flush_l1d))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "qi" ((pto_T__)(1))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(1))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(1))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "re" ((pto_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((irq_stat.kvm_cpu_l1tf_flush_l1d)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (1); (void)pto_tmp__; } switch (sizeof((irq_stat.kvm_cpu_l1tf_flush_l1d))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "qi" ((pto_T__)(1))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(1))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(1))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "re" ((pto_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kvm_clear_cpu_l1tf_flush_l1d(void) +{ + ({ __this_cpu_preempt_check("write"); do { do { const void *__vpp_verify = (typeof((&(irq_stat.kvm_cpu_l1tf_flush_l1d)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(irq_stat.kvm_cpu_l1tf_flush_l1d)) { case 1: do { typedef typeof((irq_stat.kvm_cpu_l1tf_flush_l1d)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (0); (void)pto_tmp__; } switch (sizeof((irq_stat.kvm_cpu_l1tf_flush_l1d))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "qi" ((pto_T__)(0))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(0))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(0))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "re" ((pto_T__)(0))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((irq_stat.kvm_cpu_l1tf_flush_l1d)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (0); (void)pto_tmp__; } switch (sizeof((irq_stat.kvm_cpu_l1tf_flush_l1d))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "qi" ((pto_T__)(0))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(0))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(0))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "re" ((pto_T__)(0))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((irq_stat.kvm_cpu_l1tf_flush_l1d)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (0); (void)pto_tmp__; } switch (sizeof((irq_stat.kvm_cpu_l1tf_flush_l1d))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "qi" ((pto_T__)(0))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(0))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(0))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "re" ((pto_T__)(0))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((irq_stat.kvm_cpu_l1tf_flush_l1d)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (0); (void)pto_tmp__; } switch (sizeof((irq_stat.kvm_cpu_l1tf_flush_l1d))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "qi" ((pto_T__)(0))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(0))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "ri" ((pto_T__)(0))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((irq_stat.kvm_cpu_l1tf_flush_l1d)) : "re" ((pto_T__)(0))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool kvm_get_cpu_l1tf_flush_l1d(void) +{ + return ({ __this_cpu_preempt_check("read"); ({ typeof(irq_stat.kvm_cpu_l1tf_flush_l1d) pscr_ret__; do { const void *__vpp_verify = (typeof((&(irq_stat.kvm_cpu_l1tf_flush_l1d)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(irq_stat.kvm_cpu_l1tf_flush_l1d)) { case 1: pscr_ret__ = ({ typeof(irq_stat.kvm_cpu_l1tf_flush_l1d) pfo_ret__; switch (sizeof(irq_stat.kvm_cpu_l1tf_flush_l1d)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(irq_stat.kvm_cpu_l1tf_flush_l1d) pfo_ret__; switch (sizeof(irq_stat.kvm_cpu_l1tf_flush_l1d)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(irq_stat.kvm_cpu_l1tf_flush_l1d) pfo_ret__; switch (sizeof(irq_stat.kvm_cpu_l1tf_flush_l1d)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(irq_stat.kvm_cpu_l1tf_flush_l1d) pfo_ret__; switch (sizeof(irq_stat.kvm_cpu_l1tf_flush_l1d)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_stat.kvm_cpu_l1tf_flush_l1d)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }); +} +# 15 "./arch/x86/include/asm/apic.h" 2 +# 45 "./arch/x86/include/asm/apic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void generic_apic_probe(void) +{ +} + + + + +extern int apic_verbosity; +extern int local_apic_timer_c2_ok; + +extern int disable_apic; +extern unsigned int lapic_timer_period; + +extern enum apic_intr_mode_id apic_intr_mode; +enum apic_intr_mode_id { + APIC_PIC, + APIC_VIRTUAL_WIRE, + APIC_VIRTUAL_WIRE_NO_CONFIG, + APIC_SYMMETRIC_IO, + APIC_SYMMETRIC_IO_NO_ROUTING +}; + + +extern void __inquire_remote_apic(int apicid); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void default_inquire_remote_apic(int apicid) +{ + if (apic_verbosity >= 2) + __inquire_remote_apic(apicid); +} +# 89 "./arch/x86/include/asm/apic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool apic_from_smp_config(void) +{ + return smp_found_config && !disable_apic; +} +# 101 "./arch/x86/include/asm/apic.h" +extern int setup_profiling_timer(unsigned int); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_apic_mem_write(u32 reg, u32 v) +{ + volatile u32 *addr = (volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg); + + asm volatile ("# ALT: oldnstr\n" "661:\n\t" "movl %0, %P1" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "(19*32 + (5))" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "xchgl %0, %P1" "\n" "665""1" ":\n" ".popsection\n" : "=r" (v), "=m" (*addr) : "i" (0), "0" (v), "m" (*addr)) + + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 native_apic_mem_read(u32 reg) +{ + return *((volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg)); +} + +extern void native_apic_wait_icr_idle(void); +extern u32 native_safe_apic_wait_icr_idle(void); +extern void native_apic_icr_write(u32 low, u32 id); +extern u64 native_apic_icr_read(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool apic_is_x2apic_enabled(void) +{ + u64 msr; + + if (rdmsrl_safe(0x0000001b, &msr)) + return false; + return msr & (1UL << 10); +} + +extern void enable_IR_x2apic(void); + +extern int get_physical_broadcast(void); + +extern int lapic_get_maxlvt(void); +extern void clear_local_APIC(void); +extern void disconnect_bsp_APIC(int virt_wire_setup); +extern void disable_local_APIC(void); +extern void apic_soft_disable(void); +extern void lapic_shutdown(void); +extern void sync_Arb_IDs(void); +extern void init_bsp_APIC(void); +extern void apic_intr_mode_select(void); +extern void apic_intr_mode_init(void); +extern void init_apic_mappings(void); +void register_lapic_address(unsigned long address); +extern void setup_boot_APIC_clock(void); +extern void setup_secondary_APIC_clock(void); +extern void lapic_update_tsc_freq(void); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int apic_force_enable(unsigned long addr) +{ + return -1; +} + + + + +extern void apic_ap_setup(void); + + + + + +extern int apic_is_clustered_box(void); + + + + + + + +extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); +extern void lapic_assign_system_vectors(void); +extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace); +extern void lapic_online(void); +extern void lapic_offline(void); +extern bool apic_needs_pit(void); + +extern void apic_send_IPI_allbutself(unsigned int vector); +# 205 "./arch/x86/include/asm/apic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void x2apic_wrmsr_fence(void) +{ + asm volatile("mfence" : : : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_apic_msr_write(u32 reg, u32 v) +{ + if (reg == 0xE0 || reg == 0x20 || reg == 0xD0 || + reg == 0x30) + return; + + do { paravirt_write_msr(0x800 + (reg >> 4), v, 0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_apic_msr_eoi_write(u32 reg, u32 v) +{ + __wrmsr(0x800 + (0xB0 >> 4), 0x0, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 native_apic_msr_read(u32 reg) +{ + u64 msr; + + if (reg == 0xE0) + return -1; + + do { msr = paravirt_read_msr(0x800 + (reg >> 4)); } while (0); + return (u32)msr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_x2apic_wait_icr_idle(void) +{ + + return; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 native_safe_x2apic_wait_icr_idle(void) +{ + + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_x2apic_icr_write(u32 low, u32 id) +{ + wrmsrl(0x800 + (0x300 >> 4), ((__u64) id) << 32 | low); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 native_x2apic_icr_read(void) +{ + unsigned long val; + + do { val = paravirt_read_msr(0x800 + (0x300 >> 4)); } while (0); + return val; +} + +extern int x2apic_mode; +extern int x2apic_phys; +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) check_x2apic(void); +extern void x2apic_setup(void); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int x2apic_enabled(void) +{ + return (__builtin_constant_p(( 4*32+21)) && ( (((( 4*32+21))>>5)==(0) && (1UL<<((( 4*32+21))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+21))>>5)==(1) && (1UL<<((( 4*32+21))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+21))>>5)==(2) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(3) && (1UL<<((( 4*32+21))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+21))>>5)==(4) && (1UL<<((( 4*32+21))&31) & (0) )) || (((( 4*32+21))>>5)==(5) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(6) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(7) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(8) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(9) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(10) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(11) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(12) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(13) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(14) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(15) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(16) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(17) && (1UL<<((( 4*32+21))&31) & 0 )) || (((( 4*32+21))>>5)==(18) && (1UL<<((( 4*32+21))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 4*32+21), (unsigned long *)((&boot_cpu_data)->x86_capability))) && apic_is_x2apic_enabled(); +} +# 279 "./arch/x86/include/asm/apic.h" +struct irq_data; +# 290 "./arch/x86/include/asm/apic.h" +struct apic { + + void (*eoi_write)(u32 reg, u32 v); + void (*native_eoi_write)(u32 reg, u32 v); + void (*write)(u32 reg, u32 v); + u32 (*read)(u32 reg); + + + void (*wait_icr_idle)(void); + u32 (*safe_wait_icr_idle)(void); + + void (*send_IPI)(int cpu, int vector); + void (*send_IPI_mask)(const struct cpumask *mask, int vector); + void (*send_IPI_mask_allbutself)(const struct cpumask *msk, int vec); + void (*send_IPI_allbutself)(int vector); + void (*send_IPI_all)(int vector); + void (*send_IPI_self)(int vector); + + + u32 dest_logical; + u32 disable_esr; + u32 irq_delivery_mode; + u32 irq_dest_mode; + + u32 (*calc_dest_apicid)(unsigned int cpu); + + + u64 (*icr_read)(void); + void (*icr_write)(u32 low, u32 high); + + + int (*probe)(void); + int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); + int (*apic_id_valid)(u32 apicid); + int (*apic_id_registered)(void); + + bool (*check_apicid_used)(physid_mask_t *map, int apicid); + void (*init_apic_ldr)(void); + void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); + void (*setup_apic_routing)(void); + int (*cpu_present_to_apicid)(int mps_cpu); + void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); + int (*check_phys_apicid_present)(int phys_apicid); + int (*phys_pkg_id)(int cpuid_apic, int index_msb); + + u32 (*get_apic_id)(unsigned long x); + u32 (*set_apic_id)(unsigned int id); + + + int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); + + void (*inquire_remote_apic)(int apicid); +# 356 "./arch/x86/include/asm/apic.h" + char *name; +}; + + + + + + +extern struct apic *apic; +# 384 "./arch/x86/include/asm/apic.h" +extern struct apic *__apicdrivers[], *__apicdrivers_end[]; + + + + + +extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); +extern int lapic_can_unplug_cpu(void); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 apic_read(u32 reg) +{ + return apic->read(reg); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void apic_write(u32 reg, u32 val) +{ + apic->write(reg, val); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void apic_eoi(void) +{ + apic->eoi_write(0xB0, 0x0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 apic_icr_read(void) +{ + return apic->icr_read(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void apic_icr_write(u32 low, u32 high) +{ + apic->icr_write(low, high); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void apic_wait_icr_idle(void) +{ + apic->wait_icr_idle(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 safe_apic_wait_icr_idle(void) +{ + return apic->safe_wait_icr_idle(); +} + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)); +# 446 "./arch/x86/include/asm/apic.h" +extern void apic_ack_irq(struct irq_data *data); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ack_APIC_irq(void) +{ + + + + + apic_eoi(); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool lapic_vector_set_in_irr(unsigned int vector) +{ + u32 irr = apic_read(0x200 + (vector / 32 * 0x10)); + + return !!(irr & (1U << (vector % 32))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned default_get_apic_id(unsigned long x) +{ + unsigned int ver = ((apic_read(0x30)) & 0xFFu); + + if (((ver) >= 0x14) || (__builtin_constant_p(( 3*32+26)) && ( (((( 3*32+26))>>5)==(0) && (1UL<<((( 3*32+26))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 3*32+26))>>5)==(1) && (1UL<<((( 3*32+26))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 3*32+26))>>5)==(2) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(3) && (1UL<<((( 3*32+26))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 3*32+26))>>5)==(4) && (1UL<<((( 3*32+26))&31) & (0) )) || (((( 3*32+26))>>5)==(5) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(6) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(7) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(8) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(9) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(10) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(11) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(12) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(13) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(14) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(15) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(16) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(17) && (1UL<<((( 3*32+26))&31) & 0 )) || (((( 3*32+26))>>5)==(18) && (1UL<<((( 3*32+26))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 3*32+26), (unsigned long *)((&boot_cpu_data)->x86_capability)))) + return (x >> 24) & 0xFF; + else + return (x >> 24) & 0x0F; +} + + + + + + + +extern void generic_bigsmp_probe(void); + + + +# 1 "./arch/x86/include/asm/smp.h" 1 +# 486 "./arch/x86/include/asm/apic.h" 2 + + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_x86_bios_cpu_apicid; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[]; + +extern struct apic apic_noop; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int read_apic_id(void) +{ + unsigned int reg = apic_read(0x20); + + return apic->get_apic_id(reg); +} + +extern int default_apic_id_valid(u32 apicid); +extern int default_acpi_madt_oem_check(char *, char *); +extern void default_setup_apic_routing(void); + +extern u32 apic_default_calc_apicid(unsigned int cpu); +extern u32 apic_flat_calc_apicid(unsigned int cpu); + +extern bool default_check_apicid_used(physid_mask_t *map, int apicid); +extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap); +extern int default_cpu_present_to_apicid(int mps_cpu); +extern int default_check_phys_apicid_present(int phys_apicid); + + + + +bool apic_id_is_primary_thread(unsigned int id); +void apic_smt_update(void); + + + + + +extern void ioapic_zap_locks(void); +# 14 "./arch/x86/include/asm/smp.h" 2 + +# 1 "./arch/x86/include/asm/io_apic.h" 1 + + + + + + + +# 1 "./arch/x86/include/asm/irq_vectors.h" 1 +# 9 "./arch/x86/include/asm/io_apic.h" 2 +# 28 "./arch/x86/include/asm/io_apic.h" +union IO_APIC_reg_00 { + u32 raw; + struct { + u32 __reserved_2 : 14, + LTS : 1, + delivery_type : 1, + __reserved_1 : 8, + ID : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_01 { + u32 raw; + struct { + u32 version : 8, + __reserved_2 : 7, + PRQ : 1, + entries : 8, + __reserved_1 : 8; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_02 { + u32 raw; + struct { + u32 __reserved_2 : 24, + arbitration : 4, + __reserved_1 : 4; + } __attribute__ ((packed)) bits; +}; + +union IO_APIC_reg_03 { + u32 raw; + struct { + u32 boot_DT : 1, + __reserved_1 : 31; + } __attribute__ ((packed)) bits; +}; + +struct IO_APIC_route_entry { + __u32 vector : 8, + delivery_mode : 3, + + + + dest_mode : 1, + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, + mask : 1, + __reserved_2 : 15; + + __u32 __reserved_3 : 24, + dest : 8; +} __attribute__ ((packed)); + +struct IR_IO_APIC_route_entry { + __u64 vector : 8, + zero : 3, + index2 : 1, + delivery_status : 1, + polarity : 1, + irr : 1, + trigger : 1, + mask : 1, + reserved : 31, + format : 1, + index : 15; +} __attribute__ ((packed)); + +struct irq_alloc_info; +struct ioapic_domain_cfg; +# 123 "./arch/x86/include/asm/io_apic.h" +extern int nr_ioapics; + +extern int mpc_ioapic_id(int ioapic); +extern unsigned int mpc_ioapic_addr(int ioapic); + + +extern int mp_irq_entries; + + +extern struct mpc_intsrc mp_irqs[(256 * 4)]; + + +extern int skip_ioapic_setup; + + +extern int noioapicquirk; + + +extern int noioapicreroute; + +extern u32 gsi_top; + +extern unsigned long io_apic_irqs; +# 156 "./arch/x86/include/asm/io_apic.h" +struct irq_cfg; +extern void ioapic_insert_resources(void); +extern int arch_early_ioapic_init(void); + +extern int save_ioapic_entries(void); +extern void mask_ioapic_entries(void); +extern int restore_ioapic_entries(void); + +extern void setup_ioapic_ids_from_mpc(void); +extern void setup_ioapic_ids_from_mpc_nocheck(void); + +extern int mp_find_ioapic(u32 gsi); +extern int mp_find_ioapic_pin(int ioapic, u32 gsi); +extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, + struct irq_alloc_info *info); +extern void mp_unmap_irq(int irq); +extern int mp_register_ioapic(int id, u32 address, u32 gsi_base, + struct ioapic_domain_cfg *cfg); +extern int mp_unregister_ioapic(u32 gsi_base); +extern int mp_ioapic_registered(u32 gsi_base); + +extern void ioapic_set_alloc_attr(struct irq_alloc_info *info, + int node, int trigger, int polarity); + +extern void mp_save_irq(struct mpc_intsrc *m); + +extern void disable_ioapic_support(void); + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) io_apic_init_mappings(void); +extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); +extern void native_restore_boot_irq_mode(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int io_apic_read(unsigned int apic, unsigned int reg) +{ + return x86_apic_ops.io_apic_read(apic, reg); +} + +extern void setup_IO_APIC(void); +extern void enable_IO_APIC(void); +extern void clear_IO_APIC(void); +extern void restore_boot_irq_mode(void); +extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin); +extern void print_IO_APICs(void); +# 16 "./arch/x86/include/asm/smp.h" 2 + + + + + +extern int smp_num_siblings; +extern unsigned int num_processors; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_sibling_map; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(cpumask_var_t) cpu_sibling_map; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_core_map; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(cpumask_var_t) cpu_core_map; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_die_map; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(cpumask_var_t) cpu_die_map; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_llc_shared_map; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(cpumask_var_t) cpu_llc_shared_map; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_llc_id; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u16) cpu_llc_id; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_number; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(int) cpu_number; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cpumask *cpu_llc_shared_mask(int cpu) +{ + return (*({ do { const void *__vpp_verify = (typeof((&(cpu_llc_shared_map)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(cpu_llc_shared_map)))) *)((&(cpu_llc_shared_map))))); (typeof((typeof(*((&(cpu_llc_shared_map)))) *)((&(cpu_llc_shared_map))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); +} + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_x86_cpu_to_apicid; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u16) x86_cpu_to_apicid; extern __typeof__(u16) *x86_cpu_to_apicid_early_ptr; extern __typeof__(u16) x86_cpu_to_apicid_early_map[]; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_x86_cpu_to_acpiid; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u32) x86_cpu_to_acpiid; extern __typeof__(u32) *x86_cpu_to_acpiid_early_ptr; extern __typeof__(u32) x86_cpu_to_acpiid_early_map[]; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_x86_bios_cpu_apicid; extern __attribute__((section(".data..percpu" "..read_mostly"))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[]; + + + + +struct task_struct; + +struct smp_ops { + void (*smp_prepare_boot_cpu)(void); + void (*smp_prepare_cpus)(unsigned max_cpus); + void (*smp_cpus_done)(unsigned max_cpus); + + void (*stop_other_cpus)(int wait); + void (*crash_stop_other_cpus)(void); + void (*smp_send_reschedule)(int cpu); + + int (*cpu_up)(unsigned cpu, struct task_struct *tidle); + int (*cpu_disable)(void); + void (*cpu_die)(unsigned int cpu); + void (*play_dead)(void); + + void (*send_call_func_ipi)(const struct cpumask *mask); + void (*send_call_func_single_ipi)(int cpu); +}; + + +extern void set_cpu_sibling_map(int cpu); + + +extern struct smp_ops smp_ops; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_send_stop(void) +{ + smp_ops.stop_other_cpus(0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void stop_other_cpus(void) +{ + smp_ops.stop_other_cpus(1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_prepare_boot_cpu(void) +{ + smp_ops.smp_prepare_boot_cpu(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_prepare_cpus(unsigned int max_cpus) +{ + smp_ops.smp_prepare_cpus(max_cpus); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_cpus_done(unsigned int max_cpus) +{ + smp_ops.smp_cpus_done(max_cpus); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + return smp_ops.cpu_up(cpu, tidle); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __cpu_disable(void) +{ + return smp_ops.cpu_disable(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __cpu_die(unsigned int cpu) +{ + smp_ops.cpu_die(cpu); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void play_dead(void) +{ + smp_ops.play_dead(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void smp_send_reschedule(int cpu) +{ + smp_ops.smp_send_reschedule(cpu); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops.send_call_func_single_ipi(cpu); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_ops.send_call_func_ipi(mask); +} + +void cpu_disable_common(void); +void native_smp_prepare_boot_cpu(void); +void native_smp_prepare_cpus(unsigned int max_cpus); +void calculate_max_logical_packages(void); +void native_smp_cpus_done(unsigned int max_cpus); +int common_cpu_up(unsigned int cpunum, struct task_struct *tidle); +int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); +int native_cpu_disable(void); +int common_cpu_die(unsigned int cpu); +void native_cpu_die(unsigned int cpu); +void hlt_play_dead(void); +void native_play_dead(void); +void play_dead_common(void); +void wbinvd_on_cpu(int cpu); +int wbinvd_on_all_cpus(void); + +void native_smp_send_reschedule(int cpu); +void native_send_call_func_ipi(const struct cpumask *mask); +void native_send_call_func_single_ipi(int cpu); +void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); + +void smp_store_boot_cpu_info(void); +void smp_store_cpu_info(int id); + + __attribute__((__externally_visible__)) void smp_reboot_interrupt(void); +__attribute__((__externally_visible__)) void smp_reschedule_interrupt(struct pt_regs *regs); +__attribute__((__externally_visible__)) void smp_call_function_interrupt(struct pt_regs *regs); +__attribute__((__externally_visible__)) void smp_call_function_single_interrupt(struct pt_regs *r); +# 185 "./arch/x86/include/asm/smp.h" +extern unsigned disabled_cpus; + + +extern int hard_smp_processor_id(void); + + + + + + +extern void nmi_selftest(void); +# 12 "./arch/x86/include/asm/mmzone_64.h" 2 + +extern struct pglist_data *node_data[]; +# 6 "./arch/x86/include/asm/mmzone.h" 2 +# 950 "./include/linux/mmzone.h" 2 + + + +extern struct pglist_data *first_online_pgdat(void); +extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); +extern struct zone *next_zone(struct zone *zone); +# 985 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct zone *zonelist_zone(struct zoneref *zoneref) +{ + return zoneref->zone; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int zonelist_zone_idx(struct zoneref *zoneref) +{ + return zoneref->zone_idx; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int zonelist_node_idx(struct zoneref *zoneref) +{ + return zone_to_nid(zoneref->zone); +} + +struct zoneref *__next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes); +# 1016 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) struct zoneref *next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes) +{ + if (__builtin_expect(!!(!nodes && zonelist_zone_idx(z) <= highest_zoneidx), 1)) + return z; + return __next_zones_zonelist(z, highest_zoneidx, nodes); +} +# 1041 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist, + enum zone_type highest_zoneidx, + nodemask_t *nodes) +{ + return next_zones_zonelist(zonelist->_zonerefs, + highest_zoneidx, nodes); +} +# 1116 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pfn_to_section_nr(unsigned long pfn) +{ + return pfn >> (27 - 12); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long section_nr_to_pfn(unsigned long sec) +{ + return sec << (27 - 12); +} +# 1144 "./include/linux/mmzone.h" +struct mem_section_usage { + + unsigned long subsection_map[((((1UL << (27 - 21))) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; + + + unsigned long pageblock_flags[0]; +}; + +void subsection_map_init(unsigned long pfn, unsigned long nr_pages); + +struct page; +struct page_ext; +struct mem_section { +# 1169 "./include/linux/mmzone.h" + unsigned long section_mem_map; + + struct mem_section_usage *usage; + + + + + + struct page_ext *page_ext; + unsigned long pad; + + + + + +}; +# 1197 "./include/linux/mmzone.h" +extern struct mem_section **mem_section; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long *section_to_usemap(struct mem_section *ms) +{ + return ms->usage->pageblock_flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_section *__nr_to_section(unsigned long nr) +{ + + if (!mem_section) + return ((void *)0); + + if (!mem_section[((nr) / (((1UL) << 12) / sizeof (struct mem_section)))]) + return ((void *)0); + return &mem_section[((nr) / (((1UL) << 12) / sizeof (struct mem_section)))][nr & ((((1UL) << 12) / sizeof (struct mem_section)) - 1)]; +} +extern unsigned long __section_nr(struct mem_section *ms); +extern size_t mem_section_usage_size(void); +# 1241 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *__section_mem_map_addr(struct mem_section *section) +{ + unsigned long map = section->section_mem_map; + map &= (~((1UL<<4)-1)); + return (struct page *)map; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int present_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & (1UL<<0))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int present_section_nr(unsigned long nr) +{ + return present_section(__nr_to_section(nr)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int valid_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & (1UL<<1))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int early_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & (1UL<<3))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int valid_section_nr(unsigned long nr) +{ + return valid_section(__nr_to_section(nr)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int online_section(struct mem_section *section) +{ + return (section && (section->section_mem_map & (1UL<<2))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int online_section_nr(unsigned long nr) +{ + return online_section(__nr_to_section(nr)); +} + + +void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); + +void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_section *__pfn_to_section(unsigned long pfn) +{ + return __nr_to_section(pfn_to_section_nr(pfn)); +} + +extern unsigned long __highest_present_section_nr; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int subsection_map_index(unsigned long pfn) +{ + return (pfn & ~((~((1UL << (27 - 12))-1)))) / (1UL << (21 - 12)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pfn_section_valid(struct mem_section *ms, unsigned long pfn) +{ + int idx = subsection_map_index(pfn); + + return test_bit(idx, ms->usage->subsection_map); +} +# 1317 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pfn_valid(unsigned long pfn) +{ + struct mem_section *ms; + + if (pfn_to_section_nr(pfn) >= (1UL << (((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) )) ? 52 : 46) - 27))) + return 0; + ms = __nr_to_section(pfn_to_section_nr(pfn)); + if (!valid_section(ms)) + return 0; + + + + + return early_section(ms) || pfn_section_valid(ms, pfn); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pfn_in_present_section(unsigned long pfn) +{ + if (pfn_to_section_nr(pfn) >= (1UL << (((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) )) ? 52 : 46) - 27))) + return 0; + return present_section(__nr_to_section(pfn_to_section_nr(pfn))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long next_present_section_nr(unsigned long section_nr) +{ + while (++section_nr <= __highest_present_section_nr) { + if (present_section_nr(section_nr)) + return section_nr; + } + + return -1; +} +# 1367 "./include/linux/mmzone.h" +void sparse_init(void); +# 1380 "./include/linux/mmzone.h" +struct mminit_pfnnid_cache { + unsigned long last_start; + unsigned long last_end; + int last_nid; +}; + + + + + +void memory_present(int nid, unsigned long start, unsigned long end); +# 1428 "./include/linux/mmzone.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone) +{ + return true; +} +# 7 "./include/linux/gfp.h" 2 + + +# 1 "./include/linux/topology.h" 1 +# 30 "./include/linux/topology.h" +# 1 "./include/linux/arch_topology.h" 1 +# 9 "./include/linux/arch_topology.h" +# 1 "./include/linux/percpu.h" 1 + + + + + + +# 1 "./include/linux/smp.h" 1 +# 17 "./include/linux/smp.h" +typedef void (*smp_call_func_t)(void *info); +typedef bool (*smp_cond_func_t)(int cpu, void *info); + +enum { + CSD_FLAG_LOCK = 0x01, + + + + CSD_TYPE_ASYNC = 0x00, + CSD_TYPE_SYNC = 0x10, + CSD_TYPE_IRQ_WORK = 0x20, + CSD_TYPE_TTWU = 0x30, + CSD_FLAG_TYPE_MASK = 0xF0, +}; + + + + +struct __call_single_data { + struct llist_node llist; + unsigned int flags; + smp_call_func_t func; + void *info; +}; + + +typedef struct __call_single_data call_single_data_t + __attribute__((__aligned__(sizeof(struct __call_single_data)))); + + + + + +extern void __smp_call_single_queue(int cpu, struct llist_node *node); + + +extern unsigned int total_cpus; + +int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, + int wait); + + + + +void on_each_cpu(smp_call_func_t func, void *info, int wait); + + + + + +void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, + void *info, bool wait); + + + + + + +void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, + void *info, bool wait); + +void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, + void *info, bool wait, const struct cpumask *mask); + +int smp_call_function_single_async(int cpu, call_single_data_t *csd); +# 99 "./include/linux/smp.h" +extern void smp_send_stop(void); + + + + +extern void smp_send_reschedule(int cpu); + + + + + +extern void smp_prepare_cpus(unsigned int max_cpus); + + + + +extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); + + + + +extern void smp_cpus_done(unsigned int max_cpus); + + + + +void smp_call_function(smp_call_func_t func, void *info, int wait); +void smp_call_function_many(const struct cpumask *mask, + smp_call_func_t func, void *info, bool wait); + +int smp_call_function_any(const struct cpumask *mask, + smp_call_func_t func, void *info, int wait); + +void kick_all_cpus_sync(void); +void wake_up_all_idle_cpus(void); + + + + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) call_function_init(void); +void generic_smp_call_function_single_interrupt(void); + + + + + + + +void smp_prepare_boot_cpu(void); + +extern unsigned int setup_max_cpus; +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) setup_nr_cpu_ids(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) smp_init(void); + +extern int __boot_cpu_id; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_boot_cpu_id(void) +{ + return __boot_cpu_id; +} +# 250 "./include/linux/smp.h" +extern void arch_disable_smp_support(void); + +extern void arch_thaw_secondary_cpus_begin(void); +extern void arch_thaw_secondary_cpus_end(void); + +void smp_setup_processor_id(void); + +int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, + bool phys); + + +int smpcfd_prepare_cpu(unsigned int cpu); +int smpcfd_dead_cpu(unsigned int cpu); +int smpcfd_dying_cpu(unsigned int cpu); +# 8 "./include/linux/percpu.h" 2 +# 65 "./include/linux/percpu.h" +extern void *pcpu_base_addr; +extern const unsigned long *pcpu_unit_offsets; + +struct pcpu_group_info { + int nr_units; + unsigned long base_offset; + unsigned int *cpu_map; + +}; + +struct pcpu_alloc_info { + size_t static_size; + size_t reserved_size; + size_t dyn_size; + size_t unit_size; + size_t atom_size; + size_t alloc_size; + size_t __ai_size; + int nr_groups; + struct pcpu_group_info groups[]; +}; + +enum pcpu_fc { + PCPU_FC_AUTO, + PCPU_FC_EMBED, + PCPU_FC_PAGE, + + PCPU_FC_NR, +}; +extern const char * const pcpu_fc_names[PCPU_FC_NR]; + +extern enum pcpu_fc pcpu_chosen_fc; + +typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, + size_t align); +typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); +typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); +typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); + +extern struct pcpu_alloc_info * __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pcpu_alloc_alloc_info(int nr_groups, + int nr_units); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pcpu_free_alloc_info(struct pcpu_alloc_info *ai); + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, + void *base_addr); + + +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, + size_t atom_size, + pcpu_fc_cpu_distance_fn_t cpu_distance_fn, + pcpu_fc_alloc_fn_t alloc_fn, + pcpu_fc_free_fn_t free_fn); + + + +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pcpu_page_first_chunk(size_t reserved_size, + pcpu_fc_alloc_fn_t alloc_fn, + pcpu_fc_free_fn_t free_fn, + pcpu_fc_populate_pte_fn_t populate_pte_fn); + + +extern void *__alloc_reserved_percpu(size_t size, size_t align); +extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); +extern bool is_kernel_percpu_address(unsigned long addr); + + + + + +extern void *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); +extern void *__alloc_percpu(size_t size, size_t align); +extern void free_percpu(void *__pdata); +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); +# 146 "./include/linux/percpu.h" +extern unsigned long pcpu_nr_pages(void); +# 10 "./include/linux/arch_topology.h" 2 + +void topology_normalize_cpu_scale(void); +int topology_update_cpu_topology(void); + +struct device_node; +bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_scale; extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) cpu_scale; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long topology_get_cpu_scale(int cpu) +{ + return (*({ do { const void *__vpp_verify = (typeof((&(cpu_scale)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(cpu_scale)))) *)((&(cpu_scale))))); (typeof((typeof(*((&(cpu_scale)))) *)((&(cpu_scale))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); +} + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_freq_scale; extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) freq_scale; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long topology_get_freq_scale(int cpu) +{ + return (*({ do { const void *__vpp_verify = (typeof((&(freq_scale)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(freq_scale)))) *)((&(freq_scale))))); (typeof((typeof(*((&(freq_scale)))) *)((&(freq_scale))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); +} + +bool arch_freq_counters_available(struct cpumask *cpus); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_thermal_pressure; extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) thermal_pressure; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long topology_get_thermal_pressure(int cpu) +{ + return (*({ do { const void *__vpp_verify = (typeof((&(thermal_pressure)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(thermal_pressure)))) *)((&(thermal_pressure))))); (typeof((typeof(*((&(thermal_pressure)))) *)((&(thermal_pressure))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); +} + +void arch_set_thermal_pressure(struct cpumask *cpus, + unsigned long th_pressure); + +struct cpu_topology { + int thread_id; + int core_id; + int package_id; + int llc_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; + cpumask_t llc_sibling; +}; +# 31 "./include/linux/topology.h" 2 +# 46 "./include/linux/topology.h" +int arch_update_cpu_topology(void); +# 75 "./include/linux/topology.h" +extern int __attribute__((__section__(".data..read_mostly"))) node_reclaim_distance; + + + + + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_numa_node; extern __attribute__((section(".data..percpu" ""))) __typeof__(int) numa_node; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int numa_node_id(void) +{ + return ({ typeof(numa_node) pscr_ret__; do { const void *__vpp_verify = (typeof((&(numa_node)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(numa_node)) { case 1: pscr_ret__ = ({ typeof(numa_node) pfo_ret__; switch (sizeof(numa_node)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (numa_node)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(numa_node) pfo_ret__; switch (sizeof(numa_node)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (numa_node)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(numa_node) pfo_ret__; switch (sizeof(numa_node)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (numa_node)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(numa_node) pfo_ret__; switch (sizeof(numa_node)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (numa_node)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (numa_node)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); +} +# 100 "./include/linux/topology.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_numa_node(int node) +{ + do { do { const void *__vpp_verify = (typeof((&(numa_node)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(numa_node)) { case 1: do { typedef typeof((numa_node)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (node); (void)pto_tmp__; } switch (sizeof((numa_node))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "qi" ((pto_T__)(node))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "re" ((pto_T__)(node))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((numa_node)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (node); (void)pto_tmp__; } switch (sizeof((numa_node))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "qi" ((pto_T__)(node))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "re" ((pto_T__)(node))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((numa_node)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (node); (void)pto_tmp__; } switch (sizeof((numa_node))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "qi" ((pto_T__)(node))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "re" ((pto_T__)(node))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((numa_node)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (node); (void)pto_tmp__; } switch (sizeof((numa_node))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "qi" ((pto_T__)(node))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "ri" ((pto_T__)(node))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((numa_node)) : "re" ((pto_T__)(node))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_cpu_numa_node(int cpu, int node) +{ + (*({ do { const void *__vpp_verify = (typeof((&(numa_node)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(numa_node)))) *)((&(numa_node))))); (typeof((typeof(*((&(numa_node)))) *)((&(numa_node))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })) = node; +} +# 167 "./include/linux/topology.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int numa_mem_id(void) +{ + return numa_node_id(); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpu_to_mem(int cpu) +{ + return __cpu_to_node(cpu); +} +# 202 "./include/linux/topology.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cpumask *cpu_smt_mask(int cpu) +{ + return ((*({ do { const void *__vpp_verify = (typeof((&(cpu_sibling_map)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(cpu_sibling_map)))) *)((&(cpu_sibling_map))))); (typeof((typeof(*((&(cpu_sibling_map)))) *)((&(cpu_sibling_map))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cpumask *cpu_cpu_mask(int cpu) +{ + return cpumask_of_node(__cpu_to_node(cpu)); +} +# 10 "./include/linux/gfp.h" 2 + +struct vm_area_struct; +# 315 "./include/linux/gfp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int gfp_migratetype(const gfp_t gfp_flags) +{ + (void)({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x10u)|(( gfp_t)0x08u))) == ((( gfp_t)0x10u)|(( gfp_t)0x08u))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (799)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (317), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (800)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (801)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + do { extern void __compiletime_assert_802(void) __attribute__((__error__("BUILD_BUG_ON failed: " "(1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE"))); if (!(!((1UL << 3) != 0x08u))) __compiletime_assert_802(); } while (0); + do { extern void __compiletime_assert_803(void) __attribute__((__error__("BUILD_BUG_ON failed: " "(___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE"))); if (!(!((0x08u >> 3) != MIGRATE_MOVABLE))) __compiletime_assert_803(); } while (0); + + if (__builtin_expect(!!(page_group_by_mobility_disabled), 0)) + return MIGRATE_UNMOVABLE; + + + return (gfp_flags & ((( gfp_t)0x10u)|(( gfp_t)0x08u))) >> 3; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gfpflags_allow_blocking(const gfp_t gfp_flags) +{ + return !!(gfp_flags & (( gfp_t)0x400u)); +} +# 352 "./include/linux/gfp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gfpflags_normal_context(const gfp_t gfp_flags) +{ + return (gfp_flags & ((( gfp_t)0x400u) | (( gfp_t)0x20000u))) == + (( gfp_t)0x400u); +} +# 448 "./include/linux/gfp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum zone_type gfp_zone(gfp_t flags) +{ + enum zone_type z; + int bit = ( int) (flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u))); + + z = (( (ZONE_NORMAL << 0 * 2) | (ZONE_DMA << 0x01u * 2) | (ZONE_NORMAL << 0x02u * 2) | (ZONE_DMA32 << 0x04u * 2) | (ZONE_NORMAL << 0x08u * 2) | (ZONE_DMA << (0x08u | 0x01u) * 2) | (ZONE_MOVABLE << (0x08u | 0x02u) * 2) | (ZONE_DMA32 << (0x08u | 0x04u) * 2)) >> (bit * 2)) & + ((1 << 2) - 1); + do { if (__builtin_expect(!!((( 1 << (0x01u | 0x02u) | 1 << (0x01u | 0x04u) | 1 << (0x04u | 0x02u) | 1 << (0x01u | 0x04u | 0x02u) | 1 << (0x08u | 0x02u | 0x01u) | 1 << (0x08u | 0x04u | 0x01u) | 1 << (0x08u | 0x04u | 0x02u) | 1 << (0x08u | 0x04u | 0x01u | 0x02u) ) >> bit) & 1), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (804)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (455), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (805)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + return z; +} +# 466 "./include/linux/gfp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int gfp_zonelist(gfp_t flags) +{ + + if (__builtin_expect(!!(flags & (( gfp_t)0x200000u)), 0)) + return ZONELIST_NOFALLBACK; + + return ZONELIST_FALLBACK; +} +# 484 "./include/linux/gfp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct zonelist *node_zonelist(int nid, gfp_t flags) +{ + return (node_data[nid])->node_zonelists + gfp_zonelist(flags); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_free_page(struct page *page, int order) { } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_alloc_page(struct page *page, int order) { } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_make_page_accessible(struct page *page) +{ + return 0; +} + + +struct page * +__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, + nodemask_t *nodemask); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page * +__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) +{ + return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, ((void *)0)); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page * +__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) +{ + do { if (__builtin_expect(!!(nid < 0 || nid >= (1 << 10)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (806)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (519), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (807)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + (void)({ int __ret_warn_on = !!((gfp_mask & (( gfp_t)0x200000u)) && !node_state((nid), N_ONLINE)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (808)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/gfp.h"), "i" (520), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (809)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (810)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + + return __alloc_pages(gfp_mask, order, nid); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask, + unsigned int order) +{ + if (nid == (-1)) + nid = numa_mem_id(); + + return __alloc_pages_node(nid, gfp_mask, order); +} + + +extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page * +alloc_pages(gfp_t gfp_mask, unsigned int order) +{ + return alloc_pages_current(gfp_mask, order); +} +extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, + struct vm_area_struct *vma, unsigned long addr, + int node, bool hugepage); +# 566 "./include/linux/gfp.h" +extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); +extern unsigned long get_zeroed_page(gfp_t gfp_mask); + +void *alloc_pages_exact(size_t size, gfp_t gfp_mask); +void free_pages_exact(void *virt, size_t size); +void * __attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); + + + + + + + +extern void __free_pages(struct page *page, unsigned int order); +extern void free_pages(unsigned long addr, unsigned int order); +extern void free_unref_page(struct page *page); +extern void free_unref_page_list(struct list_head *list); + +struct page_frag_cache; +extern void __page_frag_cache_drain(struct page *page, unsigned int count); +extern void *page_frag_alloc(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask); +extern void page_frag_free(void *addr); + + + + +void page_alloc_init(void); +void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); +void drain_all_pages(struct zone *zone); +void drain_local_pages(struct zone *zone); + +void page_alloc_init_late(void); +# 607 "./include/linux/gfp.h" +extern gfp_t gfp_allowed_mask; + + +bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); + +extern void pm_restrict_gfp_mask(void); +extern void pm_restore_gfp_mask(void); + + +extern bool pm_suspended_storage(void); +# 626 "./include/linux/gfp.h" +extern int alloc_contig_range(unsigned long start, unsigned long end, + unsigned migratetype, gfp_t gfp_mask); +extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, + int nid, nodemask_t *nodemask); + +void free_contig_range(unsigned long pfn, unsigned int nr_pages); + + + +extern void init_cma_reserved_pageblock(struct page *page); +# 15 "./include/linux/xarray.h" 2 +# 1 "./include/linux/kconfig.h" 1 +# 16 "./include/linux/xarray.h" 2 +# 52 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_mk_value(unsigned long v) +{ + ({ int __ret_warn_on = !!((long)v < 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (811)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/xarray.h"), "i" (54), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (812)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (813)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return (void *)((v << 1) | 1); +} +# 65 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long xa_to_value(const void *entry) +{ + return (unsigned long)entry >> 1; +} +# 77 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_is_value(const void *entry) +{ + return (unsigned long)entry & 1; +} +# 95 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_tag_pointer(void *p, unsigned long tag) +{ + return (void *)((unsigned long)p | tag); +} +# 110 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_untag_pointer(void *entry) +{ + return (void *)((unsigned long)entry & ~3UL); +} +# 125 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int xa_pointer_tag(void *entry) +{ + return (unsigned long)entry & 3UL; +} +# 143 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_mk_internal(unsigned long v) +{ + return (void *)((v << 2) | 2); +} +# 155 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long xa_to_internal(const void *entry) +{ + return (unsigned long)entry >> 2; +} +# 167 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_is_internal(const void *entry) +{ + return ((unsigned long)entry & 3) == 2; +} +# 183 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_is_zero(const void *entry) +{ + return __builtin_expect(!!(entry == xa_mk_internal(257)), 0); +} +# 199 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_is_err(const void *entry) +{ + return __builtin_expect(!!(xa_is_internal(entry) && entry >= xa_mk_internal(-4095)), 0) + ; +} +# 217 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int xa_err(void *entry) +{ + + if (xa_is_err(entry)) + return (long)entry >> 2; + return 0; +} +# 236 "./include/linux/xarray.h" +struct xa_limit { + u32 max; + u32 min; +}; + + + + + + +typedef unsigned xa_mark_t; + + + + + + + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; +# 292 "./include/linux/xarray.h" +struct xarray { + spinlock_t xa_lock; + + gfp_t xa_flags; + void * xa_head; +}; +# 347 "./include/linux/xarray.h" +void *xa_load(struct xarray *, unsigned long index); +void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); +void *xa_erase(struct xarray *, unsigned long index); +void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, + void *entry, gfp_t); +bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); +void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); +void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); +void *xa_find(struct xarray *xa, unsigned long *index, + unsigned long max, xa_mark_t) __attribute__((nonnull(2))); +void *xa_find_after(struct xarray *xa, unsigned long *index, + unsigned long max, xa_mark_t) __attribute__((nonnull(2))); +unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, + unsigned long max, unsigned int n, xa_mark_t); +void xa_destroy(struct xarray *); +# 374 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xa_init_flags(struct xarray *xa, gfp_t flags) +{ + do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&xa->xa_lock), "&xa->xa_lock", &__key, LD_WAIT_CONFIG); } while (0); + xa->xa_flags = flags; + xa->xa_head = ((void *)0); +} +# 389 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xa_init(struct xarray *xa) +{ + xa_init_flags(xa, 0); +} +# 401 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_empty(const struct xarray *xa) +{ + return xa->xa_head == ((void *)0); +} +# 414 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_marked(const struct xarray *xa, xa_mark_t mark) +{ + return xa->xa_flags & (( gfp_t)((1U << (23 + 1)) << ( unsigned)(mark))); +} +# 554 "./include/linux/xarray.h" +void *__xa_erase(struct xarray *, unsigned long index); +void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); +void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, + void *entry, gfp_t); +int __attribute__((__warn_unused_result__)) __xa_insert(struct xarray *, unsigned long index, + void *entry, gfp_t); +int __attribute__((__warn_unused_result__)) __xa_alloc(struct xarray *, u32 *id, void *entry, + struct xa_limit, gfp_t); +int __attribute__((__warn_unused_result__)) __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, + struct xa_limit, u32 *next, gfp_t); +void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); +void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); +# 581 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_store_bh(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr; + + spin_lock_bh(&(xa)->xa_lock); + curr = __xa_store(xa, index, entry, gfp); + spin_unlock_bh(&(xa)->xa_lock); + + return curr; +} +# 607 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_store_irq(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr; + + spin_lock_irq(&(xa)->xa_lock); + curr = __xa_store(xa, index, entry, gfp); + spin_unlock_irq(&(xa)->xa_lock); + + return curr; +} +# 632 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_erase_bh(struct xarray *xa, unsigned long index) +{ + void *entry; + + spin_lock_bh(&(xa)->xa_lock); + entry = __xa_erase(xa, index); + spin_unlock_bh(&(xa)->xa_lock); + + return entry; +} +# 656 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_erase_irq(struct xarray *xa, unsigned long index) +{ + void *entry; + + spin_lock_irq(&(xa)->xa_lock); + entry = __xa_erase(xa, index); + spin_unlock_irq(&(xa)->xa_lock); + + return entry; +} +# 682 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_cmpxchg(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + spin_lock(&(xa)->xa_lock); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + spin_unlock(&(xa)->xa_lock); + + return curr; +} +# 709 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + spin_lock_bh(&(xa)->xa_lock); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + spin_unlock_bh(&(xa)->xa_lock); + + return curr; +} +# 736 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + spin_lock_irq(&(xa)->xa_lock); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + spin_unlock_irq(&(xa)->xa_lock); + + return curr; +} +# 765 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) xa_insert(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) +{ + int err; + + spin_lock(&(xa)->xa_lock); + err = __xa_insert(xa, index, entry, gfp); + spin_unlock(&(xa)->xa_lock); + + return err; +} +# 794 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) xa_insert_bh(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) +{ + int err; + + spin_lock_bh(&(xa)->xa_lock); + err = __xa_insert(xa, index, entry, gfp); + spin_unlock_bh(&(xa)->xa_lock); + + return err; +} +# 823 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) xa_insert_irq(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) +{ + int err; + + spin_lock_irq(&(xa)->xa_lock); + err = __xa_insert(xa, index, entry, gfp); + spin_unlock_irq(&(xa)->xa_lock); + + return err; +} +# 852 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) int xa_alloc(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) +{ + int err; + + spin_lock(&(xa)->xa_lock); + err = __xa_alloc(xa, id, entry, limit, gfp); + spin_unlock(&(xa)->xa_lock); + + return err; +} +# 881 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) xa_alloc_bh(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) +{ + int err; + + spin_lock_bh(&(xa)->xa_lock); + err = __xa_alloc(xa, id, entry, limit, gfp); + spin_unlock_bh(&(xa)->xa_lock); + + return err; +} +# 910 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) xa_alloc_irq(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) +{ + int err; + + spin_lock_irq(&(xa)->xa_lock); + err = __xa_alloc(xa, id, entry, limit, gfp); + spin_unlock_irq(&(xa)->xa_lock); + + return err; +} +# 943 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + spin_lock(&(xa)->xa_lock); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + spin_unlock(&(xa)->xa_lock); + + return err; +} +# 976 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + spin_lock_bh(&(xa)->xa_lock); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + spin_unlock_bh(&(xa)->xa_lock); + + return err; +} +# 1009 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + spin_lock_irq(&(xa)->xa_lock); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + spin_unlock_irq(&(xa)->xa_lock); + + return err; +} +# 1039 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) +int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + return xa_err(xa_cmpxchg(xa, index, ((void *)0), xa_mk_internal(257), gfp)); +} +# 1057 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) +int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + return xa_err(xa_cmpxchg_bh(xa, index, ((void *)0), xa_mk_internal(257), gfp)); +} +# 1075 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) +int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + return xa_err(xa_cmpxchg_irq(xa, index, ((void *)0), xa_mk_internal(257), gfp)); +} +# 1090 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xa_release(struct xarray *xa, unsigned long index) +{ + xa_cmpxchg(xa, index, xa_mk_internal(257), ((void *)0), 0); +} +# 1122 "./include/linux/xarray.h" +struct xa_node { + unsigned char shift; + unsigned char offset; + unsigned char count; + unsigned char nr_values; + struct xa_node *parent; + struct xarray *array; + union { + struct list_head private_list; + struct callback_head callback_head; + }; + void *slots[(1UL << (0 ? 4 : 6))]; + union { + unsigned long tags[3][((((1UL << (0 ? 4 : 6))) + (64) - 1) / (64))]; + unsigned long marks[3][((((1UL << (0 ? 4 : 6))) + (64) - 1) / (64))]; + }; +}; + +void xa_dump(const struct xarray *); +void xa_dump_node(const struct xa_node *); +# 1162 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_head(const struct xarray *xa) +{ + return ({ typeof(*(xa->xa_head)) *________p1 = (typeof(*(xa->xa_head)) *)({ do { extern void __compiletime_assert_814(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((xa->xa_head)) == sizeof(char) || sizeof((xa->xa_head)) == sizeof(short) || sizeof((xa->xa_head)) == sizeof(int) || sizeof((xa->xa_head)) == sizeof(long)) || sizeof((xa->xa_head)) == sizeof(long long))) __compiletime_assert_814(); } while (0); ({ typeof( _Generic(((xa->xa_head)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((xa->xa_head)))) __x = (*(const volatile typeof( _Generic(((xa->xa_head)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((xa->xa_head)))) *)&((xa->xa_head))); do { } while (0); (typeof((xa->xa_head)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&xa->xa_lock)->dep_map)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/xarray.h", 1164, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(xa->xa_head)) *)(________p1)); }) + ; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_head_locked(const struct xarray *xa) +{ + return ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&xa->xa_lock)->dep_map))))) { __warned = true; lockdep_rcu_suspicious("include/linux/xarray.h", 1171, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(xa->xa_head)) *)((xa->xa_head))); }) + ; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_entry(const struct xarray *xa, + const struct xa_node *node, unsigned int offset) +{ + do { } while (0); + return ({ typeof(*(node->slots[offset])) *________p1 = (typeof(*(node->slots[offset])) *)({ do { extern void __compiletime_assert_815(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((node->slots[offset])) == sizeof(char) || sizeof((node->slots[offset])) == sizeof(short) || sizeof((node->slots[offset])) == sizeof(int) || sizeof((node->slots[offset])) == sizeof(long)) || sizeof((node->slots[offset])) == sizeof(long long))) __compiletime_assert_815(); } while (0); ({ typeof( _Generic(((node->slots[offset])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((node->slots[offset])))) __x = (*(const volatile typeof( _Generic(((node->slots[offset])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((node->slots[offset])))) *)&((node->slots[offset]))); do { } while (0); (typeof((node->slots[offset])))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&xa->xa_lock)->dep_map)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/xarray.h", 1180, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(node->slots[offset])) *)(________p1)); }) + ; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_entry_locked(const struct xarray *xa, + const struct xa_node *node, unsigned int offset) +{ + do { } while (0); + return ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&xa->xa_lock)->dep_map))))) { __warned = true; lockdep_rcu_suspicious("include/linux/xarray.h", 1189, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(node->slots[offset])) *)((node->slots[offset]))); }) + ; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct xa_node *xa_parent(const struct xarray *xa, + const struct xa_node *node) +{ + return ({ typeof(*(node->parent)) *________p1 = (typeof(*(node->parent)) *)({ do { extern void __compiletime_assert_816(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((node->parent)) == sizeof(char) || sizeof((node->parent)) == sizeof(short) || sizeof((node->parent)) == sizeof(int) || sizeof((node->parent)) == sizeof(long)) || sizeof((node->parent)) == sizeof(long long))) __compiletime_assert_816(); } while (0); ({ typeof( _Generic(((node->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((node->parent)))) __x = (*(const volatile typeof( _Generic(((node->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((node->parent)))) *)&((node->parent))); do { } while (0); (typeof((node->parent)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&xa->xa_lock)->dep_map)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/xarray.h", 1197, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(node->parent)) *)(________p1)); }) + ; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct xa_node *xa_parent_locked(const struct xarray *xa, + const struct xa_node *node) +{ + return ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&xa->xa_lock)->dep_map))))) { __warned = true; lockdep_rcu_suspicious("include/linux/xarray.h", 1205, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(node->parent)) *)((node->parent))); }) + ; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_mk_node(const struct xa_node *node) +{ + return (void *)((unsigned long)node | 2); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct xa_node *xa_to_node(const void *entry) +{ + return (struct xa_node *)((unsigned long)entry - 2); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_is_node(const void *entry) +{ + return xa_is_internal(entry) && (unsigned long)entry > 4096; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xa_mk_sibling(unsigned int offset) +{ + return xa_mk_internal(offset); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long xa_to_sibling(const void *entry) +{ + return xa_to_internal(entry); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_is_sibling(const void *entry) +{ + return 1 && xa_is_internal(entry) && + (entry < xa_mk_sibling((1UL << (0 ? 4 : 6)) - 1)); +} +# 1259 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_is_retry(const void *entry) +{ + return __builtin_expect(!!(entry == xa_mk_internal(256)), 0); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xa_is_advanced(const void *entry) +{ + return xa_is_internal(entry) && (entry <= xa_mk_internal(256)); +} +# 1287 "./include/linux/xarray.h" +typedef void (*xa_update_node_t)(struct xa_node *node); +# 1306 "./include/linux/xarray.h" +struct xa_state { + struct xarray *xa; + unsigned long xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; +}; +# 1385 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int xas_error(const struct xa_state *xas) +{ + return xa_err(xas->xa_node); +} +# 1399 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xas_set_err(struct xa_state *xas, long err) +{ + xas->xa_node = ((struct xa_node *)(((unsigned long)err << 2) | 2UL)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xas_invalid(const struct xa_state *xas) +{ + return (unsigned long)xas->xa_node & 3; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xas_valid(const struct xa_state *xas) +{ + return !xas_invalid(xas); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xas_is_node(const struct xa_state *xas) +{ + return xas_valid(xas) && xas->xa_node; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xas_not_node(struct xa_node *node) +{ + return ((unsigned long)node & 3) || !node; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xas_frozen(struct xa_node *node) +{ + return (unsigned long)node & 2; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xas_top(struct xa_node *node) +{ + return node <= ((struct xa_node *)3UL); +} +# 1465 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xas_reset(struct xa_state *xas) +{ + xas->xa_node = ((struct xa_node *)3UL); +} +# 1482 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xas_retry(struct xa_state *xas, const void *entry) +{ + if (xa_is_zero(entry)) + return true; + if (!xa_is_retry(entry)) + return false; + xas_reset(xas); + return true; +} + +void *xas_load(struct xa_state *); +void *xas_store(struct xa_state *, void *entry); +void *xas_find(struct xa_state *, unsigned long max); +void *xas_find_conflict(struct xa_state *); + +bool xas_get_mark(const struct xa_state *, xa_mark_t); +void xas_set_mark(const struct xa_state *, xa_mark_t); +void xas_clear_mark(const struct xa_state *, xa_mark_t); +void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); +void xas_init_marks(const struct xa_state *); + +bool xas_nomem(struct xa_state *, gfp_t); +void xas_pause(struct xa_state *); + +void xas_create_range(struct xa_state *); +# 1522 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xas_reload(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (node) + return xa_entry(xas->xa, node, xas->xa_offset); + return xa_head(xas->xa); +} +# 1540 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xas_set(struct xa_state *xas, unsigned long index) +{ + xas->xa_index = index; + xas->xa_node = ((struct xa_node *)3UL); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xas_set_order(struct xa_state *xas, unsigned long index, + unsigned int order) +{ + + xas->xa_index = order < 64 ? (index >> order) << order : 0; + xas->xa_shift = order - (order % (0 ? 4 : 6)); + xas->xa_sibs = (1 << (order % (0 ? 4 : 6))) - 1; + xas->xa_node = ((struct xa_node *)3UL); + + + + +} +# 1574 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xas_set_update(struct xa_state *xas, xa_update_node_t update) +{ + xas->xa_update = update; +} +# 1590 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xas_next_entry(struct xa_state *xas, unsigned long max) +{ + struct xa_node *node = xas->xa_node; + void *entry; + + if (__builtin_expect(!!(xas_not_node(node) || node->shift || xas->xa_offset != (xas->xa_index & ((1UL << (0 ? 4 : 6)) - 1))), 0) + ) + return xas_find(xas, max); + + do { + if (__builtin_expect(!!(xas->xa_index >= max), 0)) + return xas_find(xas, max); + if (__builtin_expect(!!(xas->xa_offset == ((1UL << (0 ? 4 : 6)) - 1)), 0)) + return xas_find(xas, max); + entry = xa_entry(xas->xa, node, xas->xa_offset + 1); + if (__builtin_expect(!!(xa_is_internal(entry)), 0)) + return xas_find(xas, max); + xas->xa_offset++; + xas->xa_index++; + } while (!entry); + + return entry; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int xas_find_chunk(struct xa_state *xas, bool advance, + xa_mark_t mark) +{ + unsigned long *addr = xas->xa_node->marks[( unsigned)mark]; + unsigned int offset = xas->xa_offset; + + if (advance) + offset++; + if ((1UL << (0 ? 4 : 6)) == 64) { + if (offset < (1UL << (0 ? 4 : 6))) { + unsigned long data = *addr & (~0UL << offset); + if (data) + return __ffs(data); + } + return (1UL << (0 ? 4 : 6)); + } + + return find_next_bit(addr, (1UL << (0 ? 4 : 6)), offset); +} +# 1647 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xas_next_marked(struct xa_state *xas, unsigned long max, + xa_mark_t mark) +{ + struct xa_node *node = xas->xa_node; + void *entry; + unsigned int offset; + + if (__builtin_expect(!!(xas_not_node(node) || node->shift), 0)) + return xas_find_marked(xas, max, mark); + offset = xas_find_chunk(xas, true, mark); + xas->xa_offset = offset; + xas->xa_index = (xas->xa_index & ~((1UL << (0 ? 4 : 6)) - 1)) + offset; + if (xas->xa_index > max) + return ((void *)0); + if (offset == (1UL << (0 ? 4 : 6))) + return xas_find_marked(xas, max, mark); + entry = xa_entry(xas->xa, node, offset); + if (!entry) + return xas_find_marked(xas, max, mark); + return entry; +} + + + + + +enum { + XA_CHECK_SCHED = 4096, +}; +# 1728 "./include/linux/xarray.h" +void *__xas_next(struct xa_state *); +void *__xas_prev(struct xa_state *); +# 1747 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xas_prev(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (__builtin_expect(!!(xas_not_node(node) || node->shift || xas->xa_offset == 0), 0) + ) + return __xas_prev(xas); + + xas->xa_index--; + xas->xa_offset--; + return xa_entry(xas->xa, node, xas->xa_offset); +} +# 1776 "./include/linux/xarray.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xas_next(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (__builtin_expect(!!(xas_not_node(node) || node->shift || xas->xa_offset == ((1UL << (0 ? 4 : 6)) - 1)), 0) + ) + return __xas_next(xas); + + xas->xa_index++; + xas->xa_offset++; + return xa_entry(xas->xa, node, xas->xa_offset); +} +# 19 "./include/linux/radix-tree.h" 2 +# 1 "./include/linux/local_lock.h" 1 + + + + +# 1 "./include/linux/local_lock_internal.h" 1 +# 9 "./include/linux/local_lock_internal.h" +typedef struct { + + struct lockdep_map dep_map; + struct task_struct *owner; + +} local_lock_t; +# 37 "./include/linux/local_lock_internal.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_lock_acquire(local_lock_t *l) +{ + lock_acquire(&l->dep_map, 0, 0, 0, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; })); + ({ int __ret = 0; if (!oops_in_progress && __builtin_expect(!!(l->owner), 0)) { if (debug_locks_off() && !debug_locks_silent) ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (817)); }); __warn_printk("DEBUG_LOCKS_WARN_ON(%s)", "l->owner"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (818)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/local_lock_internal.h"), "i" (40), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (819)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (820)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (821)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); __ret = 1; } __ret; }); + l->owner = get_current(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_lock_release(local_lock_t *l) +{ + ({ int __ret = 0; if (!oops_in_progress && __builtin_expect(!!(l->owner != get_current()), 0)) { if (debug_locks_off() && !debug_locks_silent) ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (822)); }); __warn_printk("DEBUG_LOCKS_WARN_ON(%s)", "l->owner != current"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (823)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/local_lock_internal.h"), "i" (46), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (824)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (825)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (826)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); __ret = 1; } __ret; }); + l->owner = ((void *)0); + lock_release(&l->dep_map, ({ __label__ __here; __here: (unsigned long)&&__here; })); +} +# 6 "./include/linux/local_lock.h" 2 +# 20 "./include/linux/radix-tree.h" 2 + + + + + +struct radix_tree_preload { + local_lock_t lock; + unsigned nr; + + struct xa_node *nodes; +}; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_radix_tree_preloads; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct radix_tree_preload) radix_tree_preloads; +# 52 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool radix_tree_is_internal_node(void *ptr) +{ + return ((unsigned long)ptr & 3UL) == + 2UL; +} +# 83 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool radix_tree_empty(const struct xarray *root) +{ + return root->xa_head == ((void *)0); +} +# 103 "./include/linux/radix-tree.h" +struct radix_tree_iter { + unsigned long index; + unsigned long next_index; + unsigned long tags; + struct xa_node *node; +}; +# 174 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *radix_tree_deref_slot(void **slot) +{ + return ({ typeof(*(*slot)) *________p1 = (typeof(*(*slot)) *)({ do { extern void __compiletime_assert_827(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((*slot)) == sizeof(char) || sizeof((*slot)) == sizeof(short) || sizeof((*slot)) == sizeof(int) || sizeof((*slot)) == sizeof(long)) || sizeof((*slot)) == sizeof(long long))) __compiletime_assert_827(); } while (0); ({ typeof( _Generic(((*slot)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*slot)))) __x = (*(const volatile typeof( _Generic(((*slot)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((*slot)))) *)&((*slot))); do { } while (0); (typeof((*slot)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/radix-tree.h", 176, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(*slot)) *)(________p1)); }); +} +# 188 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *radix_tree_deref_slot_protected(void **slot, + spinlock_t *treelock) +{ + return ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(treelock)->dep_map))))) { __warned = true; lockdep_rcu_suspicious("include/linux/radix-tree.h", 191, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(*slot)) *)((*slot))); }); +} +# 201 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int radix_tree_deref_retry(void *arg) +{ + return __builtin_expect(!!(radix_tree_is_internal_node(arg)), 0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int radix_tree_exception(void *arg) +{ + return __builtin_expect(!!((unsigned long)arg & 3UL), 0); +} + +int radix_tree_insert(struct xarray *, unsigned long index, + void *); +void *__radix_tree_lookup(const struct xarray *, unsigned long index, + struct xa_node **nodep, void ***slotp); +void *radix_tree_lookup(const struct xarray *, unsigned long); +void **radix_tree_lookup_slot(const struct xarray *, + unsigned long index); +void __radix_tree_replace(struct xarray *, struct xa_node *, + void **slot, void *entry); +void radix_tree_iter_replace(struct xarray *, + const struct radix_tree_iter *, void **slot, void *entry); +void radix_tree_replace_slot(struct xarray *, + void **slot, void *entry); +void radix_tree_iter_delete(struct xarray *, + struct radix_tree_iter *iter, void **slot); +void *radix_tree_delete_item(struct xarray *, unsigned long, void *); +void *radix_tree_delete(struct xarray *, unsigned long); +unsigned int radix_tree_gang_lookup(const struct xarray *, + void **results, unsigned long first_index, + unsigned int max_items); +int radix_tree_preload(gfp_t gfp_mask); +int radix_tree_maybe_preload(gfp_t gfp_mask); +void radix_tree_init(void); +void *radix_tree_tag_set(struct xarray *, + unsigned long index, unsigned int tag); +void *radix_tree_tag_clear(struct xarray *, + unsigned long index, unsigned int tag); +int radix_tree_tag_get(const struct xarray *, + unsigned long index, unsigned int tag); +void radix_tree_iter_tag_clear(struct xarray *, + const struct radix_tree_iter *iter, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag(const struct xarray *, + void **results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +unsigned int radix_tree_gang_lookup_tag_slot(const struct xarray *, + void ***results, unsigned long first_index, + unsigned int max_items, unsigned int tag); +int radix_tree_tagged(const struct xarray *, unsigned int tag); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void radix_tree_preload_end(void) +{ + do { local_lock_release(({ do { const void *__vpp_verify = (typeof((&radix_tree_preloads.lock) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&radix_tree_preloads.lock)); (typeof(*(&radix_tree_preloads.lock)) *)tcp_ptr__; }); })); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); +} + +void **idr_get_free(struct xarray *root, + struct radix_tree_iter *iter, gfp_t gfp, + unsigned long max); + +enum { + RADIX_TREE_ITER_TAG_MASK = 0x0f, + RADIX_TREE_ITER_TAGGED = 0x10, + RADIX_TREE_ITER_CONTIG = 0x20, +}; +# 277 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void ** +radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) +{ +# 288 "./include/linux/radix-tree.h" + iter->index = 0; + iter->next_index = start; + return ((void *)0); +} +# 306 "./include/linux/radix-tree.h" +void **radix_tree_next_chunk(const struct xarray *, + struct radix_tree_iter *iter, unsigned flags); +# 319 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ** +radix_tree_iter_lookup(const struct xarray *root, + struct radix_tree_iter *iter, unsigned long index) +{ + radix_tree_iter_init(iter, index); + return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); +} +# 336 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) +void **radix_tree_iter_retry(struct radix_tree_iter *iter) +{ + iter->next_index = iter->index; + iter->tags = 0; + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) +{ + return iter->index + slots; +} +# 360 "./include/linux/radix-tree.h" +void **__attribute__((__warn_unused_result__)) radix_tree_iter_resume(void **slot, + struct radix_tree_iter *iter); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) long +radix_tree_chunk_size(struct radix_tree_iter *iter) +{ + return iter->next_index - iter->index; +} +# 394 "./include/linux/radix-tree.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void **radix_tree_next_slot(void **slot, + struct radix_tree_iter *iter, unsigned flags) +{ + if (flags & RADIX_TREE_ITER_TAGGED) { + iter->tags >>= 1; + if (__builtin_expect(!!(!iter->tags), 0)) + return ((void *)0); + if (__builtin_expect(!!(iter->tags & 1ul), 1)) { + iter->index = __radix_tree_iter_add(iter, 1); + slot++; + goto found; + } + if (!(flags & RADIX_TREE_ITER_CONTIG)) { + unsigned offset = __ffs(iter->tags); + + iter->tags >>= offset++; + iter->index = __radix_tree_iter_add(iter, offset); + slot += offset; + goto found; + } + } else { + long count = radix_tree_chunk_size(iter); + + while (--count > 0) { + slot++; + iter->index = __radix_tree_iter_add(iter, 1); + + if (__builtin_expect(!!(*slot), 1)) + goto found; + if (flags & RADIX_TREE_ITER_CONTIG) { + + iter->next_index = 0; + break; + } + } + } + return ((void *)0); + + found: + return slot; +} +# 16 "./include/linux/fs.h" 2 + + + +# 1 "./include/linux/pid.h" 1 + + + + + + +# 1 "./include/linux/refcount.h" 1 +# 101 "./include/linux/refcount.h" +struct mutex; +# 111 "./include/linux/refcount.h" +typedef struct refcount_struct { + atomic_t refs; +} refcount_t; + + + + + +enum refcount_saturation_type { + REFCOUNT_ADD_NOT_ZERO_OVF, + REFCOUNT_ADD_OVF, + REFCOUNT_ADD_UAF, + REFCOUNT_SUB_UAF, + REFCOUNT_DEC_LEAK, +}; + +void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void refcount_set(refcount_t *r, int n) +{ + atomic_set(&r->refs, n); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int refcount_read(const refcount_t *r) +{ + return atomic_read(&r->refs); +} +# 168 "./include/linux/refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool refcount_add_not_zero(int i, refcount_t *r) +{ + int old = refcount_read(r); + + do { + if (!old) + break; + } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); + + if (__builtin_expect(!!(old < 0 || old + i < 0), 0)) + refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); + + return old; +} +# 199 "./include/linux/refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void refcount_add(int i, refcount_t *r) +{ + int old = atomic_fetch_add_relaxed(i, &r->refs); + + if (__builtin_expect(!!(!old), 0)) + refcount_warn_saturate(r, REFCOUNT_ADD_UAF); + else if (__builtin_expect(!!(old < 0 || old + i < 0), 0)) + refcount_warn_saturate(r, REFCOUNT_ADD_OVF); +} +# 222 "./include/linux/refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool refcount_inc_not_zero(refcount_t *r) +{ + return refcount_add_not_zero(1, r); +} +# 239 "./include/linux/refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void refcount_inc(refcount_t *r) +{ + refcount_add(1, r); +} +# 264 "./include/linux/refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool refcount_sub_and_test(int i, refcount_t *r) +{ + int old = atomic_fetch_sub_release(i, &r->refs); + + if (old == i) { + __asm__ __volatile__("": : :"memory"); + return true; + } + + if (__builtin_expect(!!(old < 0 || old - i < 0), 0)) + refcount_warn_saturate(r, REFCOUNT_SUB_UAF); + + return false; +} +# 292 "./include/linux/refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool refcount_dec_and_test(refcount_t *r) +{ + return refcount_sub_and_test(1, r); +} +# 307 "./include/linux/refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void refcount_dec(refcount_t *r) +{ + if (__builtin_expect(!!(atomic_fetch_sub_release(1, &r->refs) <= 1), 0)) + refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); +} + +extern __attribute__((__warn_unused_result__)) bool refcount_dec_if_one(refcount_t *r); +extern __attribute__((__warn_unused_result__)) bool refcount_dec_not_one(refcount_t *r); +extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); +extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); +extern __attribute__((__warn_unused_result__)) bool refcount_dec_and_lock_irqsave(refcount_t *r, + spinlock_t *lock, + unsigned long *flags); +# 8 "./include/linux/pid.h" 2 + +enum pid_type +{ + PIDTYPE_PID, + PIDTYPE_TGID, + PIDTYPE_PGID, + PIDTYPE_SID, + PIDTYPE_MAX, +}; +# 54 "./include/linux/pid.h" +struct upid { + int nr; + struct pid_namespace *ns; +}; + +struct pid +{ + refcount_t count; + unsigned int level; + spinlock_t lock; + + struct hlist_head tasks[PIDTYPE_MAX]; + struct hlist_head inodes; + + wait_queue_head_t wait_pidfd; + struct callback_head rcu; + struct upid numbers[1]; +}; + +extern struct pid init_struct_pid; + +extern const struct file_operations pidfd_fops; + +struct file; + +extern struct pid *pidfd_pid(const struct file *file); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid *get_pid(struct pid *pid) +{ + if (pid) + refcount_inc(&pid->count); + return pid; +} + +extern void put_pid(struct pid *pid); +extern struct task_struct *pid_task(struct pid *pid, enum pid_type); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pid_has_task(struct pid *pid, enum pid_type type) +{ + return !hlist_empty(&pid->tasks[type]); +} +extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); + +extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); + + + + +extern void attach_pid(struct task_struct *task, enum pid_type); +extern void detach_pid(struct task_struct *task, enum pid_type); +extern void change_pid(struct task_struct *task, enum pid_type, + struct pid *pid); +extern void exchange_tids(struct task_struct *task, struct task_struct *old); +extern void transfer_pid(struct task_struct *old, struct task_struct *new, + enum pid_type); + +struct pid_namespace; +extern struct pid_namespace init_pid_ns; + +extern int pid_max; +extern int pid_max_min, pid_max_max; +# 124 "./include/linux/pid.h" +extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); +extern struct pid *find_vpid(int nr); + + + + +extern struct pid *find_get_pid(int nr); +extern struct pid *find_ge_pid(int nr, struct pid_namespace *); + +extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, + size_t set_tid_size); +extern void free_pid(struct pid *pid); +extern void disable_pid_allocation(struct pid_namespace *ns); +# 148 "./include/linux/pid.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid_namespace *ns_of_pid(struct pid *pid) +{ + struct pid_namespace *ns = ((void *)0); + if (pid) + ns = pid->numbers[pid->level].ns; + return ns; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_child_reaper(struct pid *pid) +{ + return pid->numbers[pid->level].nr == 1; +} +# 178 "./include/linux/pid.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t pid_nr(struct pid *pid) +{ + pid_t nr = 0; + if (pid) + nr = pid->numbers[0].nr; + return nr; +} + +pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); +pid_t pid_vnr(struct pid *pid); +# 20 "./include/linux/fs.h" 2 + + + + +# 1 "./include/linux/capability.h" 1 +# 16 "./include/linux/capability.h" +# 1 "./include/uapi/linux/capability.h" 1 +# 39 "./include/uapi/linux/capability.h" +typedef struct __user_cap_header_struct { + __u32 version; + int pid; +} *cap_user_header_t; + +typedef struct __user_cap_data_struct { + __u32 effective; + __u32 permitted; + __u32 inheritable; +} *cap_user_data_t; +# 72 "./include/uapi/linux/capability.h" +struct vfs_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; +}; + + + + +struct vfs_ns_cap_data { + __le32 magic_etc; + struct { + __le32 permitted; + __le32 inheritable; + } data[2]; + __le32 rootid; +}; +# 17 "./include/linux/capability.h" 2 + + + + + +extern int file_caps_enabled; + +typedef struct kernel_cap_struct { + __u32 cap[2]; +} kernel_cap_t; + + +struct cpu_vfs_cap_data { + __u32 magic_etc; + kernel_cap_t permitted; + kernel_cap_t inheritable; + kuid_t rootid; +}; + + + + + +struct file; +struct inode; +struct dentry; +struct task_struct; +struct user_namespace; + +extern const kernel_cap_t __cap_empty_set; +extern const kernel_cap_t __cap_init_eff_set; +# 118 "./include/linux/capability.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kernel_cap_t cap_combine(const kernel_cap_t a, + const kernel_cap_t b) +{ + kernel_cap_t dest; + do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0); + return dest; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kernel_cap_t cap_intersect(const kernel_cap_t a, + const kernel_cap_t b) +{ + kernel_cap_t dest; + do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0); + return dest; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kernel_cap_t cap_drop(const kernel_cap_t a, + const kernel_cap_t drop) +{ + kernel_cap_t dest; + do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0); + return dest; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kernel_cap_t cap_invert(const kernel_cap_t c) +{ + kernel_cap_t dest; + do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0); + return dest; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cap_isclear(const kernel_cap_t a) +{ + unsigned __capi; + for (__capi = 0; __capi < 2; ++__capi) { + if (a.cap[__capi] != 0) + return false; + } + return true; +} +# 166 "./include/linux/capability.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set) +{ + kernel_cap_t dest; + dest = cap_drop(a, set); + return cap_isclear(dest); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a) +{ + const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } }); + return cap_drop(a, __cap_fs_set); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a, + const kernel_cap_t permitted) +{ + const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } }); + return cap_combine(a, + cap_intersect(permitted, __cap_fs_set)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a) +{ + const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } }); + return cap_drop(a, __cap_fs_set); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a, + const kernel_cap_t permitted) +{ + const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } }); + return cap_combine(a, + cap_intersect(permitted, __cap_nfsd_set)); +} + + +extern bool has_capability(struct task_struct *t, int cap); +extern bool has_ns_capability(struct task_struct *t, + struct user_namespace *ns, int cap); +extern bool has_capability_noaudit(struct task_struct *t, int cap); +extern bool has_ns_capability_noaudit(struct task_struct *t, + struct user_namespace *ns, int cap); +extern bool capable(int cap); +extern bool ns_capable(struct user_namespace *ns, int cap); +extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); +extern bool ns_capable_setid(struct user_namespace *ns, int cap); +# 250 "./include/linux/capability.h" +extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); +extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); +extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); +extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool perfmon_capable(void) +{ + return capable(38) || capable(21); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_capable(void) +{ + return capable(39) || capable(21); +} + + +extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); + +extern int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size); +# 25 "./include/linux/fs.h" 2 +# 1 "./include/linux/semaphore.h" 1 +# 15 "./include/linux/semaphore.h" +struct semaphore { + raw_spinlock_t lock; + unsigned int count; + struct list_head wait_list; +}; +# 31 "./include/linux/semaphore.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sema_init(struct semaphore *sem, int val) +{ + static struct lock_class_key __key; + *sem = (struct semaphore) { .lock = (raw_spinlock_t) { .raw_lock = { { .val = { (0) } } }, .magic = 0xdead4ead, .owner_cpu = -1, .owner = ((void *)-1L), .dep_map = { .name = "(*sem).lock", .wait_type_inner = LD_WAIT_SPIN, } }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, }; + lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); +} + +extern void down(struct semaphore *sem); +extern int __attribute__((__warn_unused_result__)) down_interruptible(struct semaphore *sem); +extern int __attribute__((__warn_unused_result__)) down_killable(struct semaphore *sem); +extern int __attribute__((__warn_unused_result__)) down_trylock(struct semaphore *sem); +extern int __attribute__((__warn_unused_result__)) down_timeout(struct semaphore *sem, long jiffies); +extern void up(struct semaphore *sem); +# 26 "./include/linux/fs.h" 2 +# 1 "./include/linux/fcntl.h" 1 + + + + + +# 1 "./include/uapi/linux/fcntl.h" 1 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/fcntl.h" 1 +# 1 "./include/uapi/asm-generic/fcntl.h" 1 +# 156 "./include/uapi/asm-generic/fcntl.h" +struct f_owner_ex { + int type; + __kernel_pid_t pid; +}; +# 196 "./include/uapi/asm-generic/fcntl.h" +struct flock { + short l_type; + short l_whence; + __kernel_off_t l_start; + __kernel_off_t l_len; + __kernel_pid_t l_pid; + +}; + + + + + + + +struct flock64 { + short l_type; + short l_whence; + __kernel_loff_t l_start; + __kernel_loff_t l_len; + __kernel_pid_t l_pid; + +}; +# 1 "./arch/x86/include/generated/uapi/asm/fcntl.h" 2 +# 6 "./include/uapi/linux/fcntl.h" 2 +# 1 "./include/uapi/linux/openat2.h" 1 +# 19 "./include/uapi/linux/openat2.h" +struct open_how { + __u64 flags; + __u64 mode; + __u64 resolve; +}; +# 7 "./include/uapi/linux/fcntl.h" 2 +# 7 "./include/linux/fcntl.h" 2 +# 27 "./include/linux/fs.h" 2 + + + +# 1 "./include/linux/migrate_mode.h" 1 +# 15 "./include/linux/migrate_mode.h" +enum migrate_mode { + MIGRATE_ASYNC, + MIGRATE_SYNC_LIGHT, + MIGRATE_SYNC, + MIGRATE_SYNC_NO_COPY, +}; +# 31 "./include/linux/fs.h" 2 + + +# 1 "./include/linux/percpu-rwsem.h" 1 + + + + + + +# 1 "./include/linux/rcuwait.h" 1 + + + + + +# 1 "./include/linux/sched/signal.h" 1 + + + + + +# 1 "./include/linux/signal.h" 1 + + + + + +# 1 "./include/linux/signal_types.h" 1 +# 10 "./include/linux/signal_types.h" +# 1 "./include/uapi/linux/signal.h" 1 + + + + +# 1 "./arch/x86/include/asm/signal.h" 1 +# 21 "./arch/x86/include/asm/signal.h" +typedef unsigned long old_sigset_t; + +typedef struct { + unsigned long sig[(64 / 64)]; +} sigset_t; +# 36 "./arch/x86/include/asm/signal.h" +# 1 "./arch/x86/include/uapi/asm/signal.h" 1 +# 11 "./arch/x86/include/uapi/asm/signal.h" +struct siginfo; +# 94 "./arch/x86/include/uapi/asm/signal.h" +# 1 "./include/uapi/asm-generic/signal-defs.h" 1 +# 18 "./include/uapi/asm-generic/signal-defs.h" +typedef void __signalfn_t(int); +typedef __signalfn_t *__sighandler_t; + +typedef void __restorefn_t(void); +typedef __restorefn_t *__sigrestore_t; +# 95 "./arch/x86/include/uapi/asm/signal.h" 2 +# 128 "./arch/x86/include/uapi/asm/signal.h" +typedef struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; +# 37 "./arch/x86/include/asm/signal.h" 2 + +extern void do_signal(struct pt_regs *regs); +# 6 "./include/uapi/linux/signal.h" 2 +# 1 "./arch/x86/include/uapi/asm/siginfo.h" 1 +# 13 "./arch/x86/include/uapi/asm/siginfo.h" +# 1 "./include/uapi/asm-generic/siginfo.h" 1 + + + + + + + +typedef union sigval { + int sival_int; + void *sival_ptr; +} sigval_t; +# 32 "./include/uapi/asm-generic/siginfo.h" +union __sifields { + + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + } _kill; + + + struct { + __kernel_timer_t _tid; + int _overrun; + sigval_t _sigval; + int _sys_private; + } _timer; + + + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + sigval_t _sigval; + } _rt; + + + struct { + __kernel_pid_t _pid; + __kernel_uid32_t _uid; + int _status; + __kernel_clock_t _utime; + __kernel_clock_t _stime; + } _sigchld; + + + struct { + void *_addr; +# 77 "./include/uapi/asm-generic/siginfo.h" + union { + + + + + short _addr_lsb; + + struct { + char _dummy_bnd[(__alignof__(void *) < sizeof(short) ? sizeof(short) : __alignof__(void *))]; + void *_lower; + void *_upper; + } _addr_bnd; + + struct { + char _dummy_pkey[(__alignof__(void *) < sizeof(short) ? sizeof(short) : __alignof__(void *))]; + __u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + + + struct { + long _band; + int _fd; + } _sigpoll; + + + struct { + void *_call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; +}; +# 129 "./include/uapi/asm-generic/siginfo.h" +typedef struct siginfo { + union { + struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; }; + int _si_pad[128/sizeof(int)]; + }; +} siginfo_t; +# 320 "./include/uapi/asm-generic/siginfo.h" +typedef struct sigevent { + sigval_t sigev_value; + int sigev_signo; + int sigev_notify; + union { + int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))]; + int _tid; + + struct { + void (*_function)(sigval_t); + void *_attribute; + } _sigev_thread; + } _sigev_un; +} sigevent_t; +# 14 "./arch/x86/include/uapi/asm/siginfo.h" 2 +# 7 "./include/uapi/linux/signal.h" 2 +# 11 "./include/linux/signal_types.h" 2 + +typedef struct kernel_siginfo { + struct { int si_signo; int si_errno; int si_code; union __sifields _sifields; }; +} kernel_siginfo_t; + + + + + +struct sigqueue { + struct list_head list; + int flags; + kernel_siginfo_t info; + struct user_struct *user; +}; + + + + +struct sigpending { + struct list_head list; + sigset_t signal; +}; + +struct sigaction { + + __sighandler_t sa_handler; + unsigned long sa_flags; + + + + + + __sigrestore_t sa_restorer; + + sigset_t sa_mask; +}; + +struct k_sigaction { + struct sigaction sa; + + + +}; +# 65 "./include/linux/signal_types.h" +struct ksignal { + struct k_sigaction ka; + kernel_siginfo_t info; + int sig; +}; +# 7 "./include/linux/signal.h" 2 + + +struct task_struct; + + +extern int print_fatal_signals; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_siginfo(kernel_siginfo_t *to, + const kernel_siginfo_t *from) +{ + memcpy(to, from, sizeof(*to)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_siginfo(kernel_siginfo_t *info) +{ + memset(info, 0, sizeof(*info)); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_siginfo_to_external(siginfo_t *to, + const kernel_siginfo_t *from) +{ + memcpy(to, from, sizeof(*from)); + memset(((char *)to) + sizeof(struct kernel_siginfo), 0, + (sizeof(struct siginfo) - sizeof(struct kernel_siginfo))); +} + +int copy_siginfo_to_user(siginfo_t *to, const kernel_siginfo_t *from); +int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t *from); + +enum siginfo_layout { + SIL_KILL, + SIL_TIMER, + SIL_POLL, + SIL_FAULT, + SIL_FAULT_MCEERR, + SIL_FAULT_BNDERR, + SIL_FAULT_PKUERR, + SIL_CHLD, + SIL_RT, + SIL_SYS, +}; + +enum siginfo_layout siginfo_layout(unsigned sig, int si_code); +# 62 "./include/linux/signal.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigaddset(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if ((64 / 64) == 1) + set->sig[0] |= 1UL << sig; + else + set->sig[sig / 64] |= 1UL << (sig % 64); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigdelset(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if ((64 / 64) == 1) + set->sig[0] &= ~(1UL << sig); + else + set->sig[sig / 64] &= ~(1UL << (sig % 64)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sigismember(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + if ((64 / 64) == 1) + return 1 & (set->sig[0] >> sig); + else + return 1 & (set->sig[sig / 64] >> (sig % 64)); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sigisemptyset(sigset_t *set) +{ + switch ((64 / 64)) { + case 4: + return (set->sig[3] | set->sig[2] | + set->sig[1] | set->sig[0]) == 0; + case 2: + return (set->sig[1] | set->sig[0]) == 0; + case 1: + return set->sig[0] == 0; + default: + do { extern void __compiletime_assert_828(void) __attribute__((__error__("BUILD_BUG failed"))); if (!(!(1))) __compiletime_assert_828(); } while (0); + return 0; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sigequalsets(const sigset_t *set1, const sigset_t *set2) +{ + switch ((64 / 64)) { + case 4: + return (set1->sig[3] == set2->sig[3]) && + (set1->sig[2] == set2->sig[2]) && + (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 2: + return (set1->sig[1] == set2->sig[1]) && + (set1->sig[0] == set2->sig[0]); + case 1: + return set1->sig[0] == set2->sig[0]; + } + return 0; +} +# 155 "./include/linux/signal.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 64)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: do { extern void __compiletime_assert_829(void) __attribute__((__error__("BUILD_BUG failed"))); if (!(!(1))) __compiletime_assert_829(); } while (0); } } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 64)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: do { extern void __compiletime_assert_830(void) __attribute__((__error__("BUILD_BUG failed"))); if (!(!(1))) __compiletime_assert_830(); } while (0); } } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigandnsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 64)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: do { extern void __compiletime_assert_831(void) __attribute__((__error__("BUILD_BUG failed"))); if (!(!(1))) __compiletime_assert_831(); } while (0); } } +# 185 "./include/linux/signal.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void signotset(sigset_t *set) { switch ((64 / 64)) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: do { extern void __compiletime_assert_832(void) __attribute__((__error__("BUILD_BUG failed"))); if (!(!(1))) __compiletime_assert_832(); } while (0); } } + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigemptyset(sigset_t *set) +{ + switch ((64 / 64)) { + default: + memset(set, 0, sizeof(sigset_t)); + break; + case 2: set->sig[1] = 0; + + case 1: set->sig[0] = 0; + break; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigfillset(sigset_t *set) +{ + switch ((64 / 64)) { + default: + memset(set, -1, sizeof(sigset_t)); + break; + case 2: set->sig[1] = -1; + + case 1: set->sig[0] = -1; + break; + } +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigaddsetmask(sigset_t *set, unsigned long mask) +{ + set->sig[0] |= mask; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sigdelsetmask(sigset_t *set, unsigned long mask) +{ + set->sig[0] &= ~mask; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sigtestsetmask(sigset_t *set, unsigned long mask) +{ + return (set->sig[0] & mask) != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void siginitset(sigset_t *set, unsigned long mask) +{ + set->sig[0] = mask; + switch ((64 / 64)) { + default: + memset(&set->sig[1], 0, sizeof(long)*((64 / 64)-1)); + break; + case 2: set->sig[1] = 0; + case 1: ; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void siginitsetinv(sigset_t *set, unsigned long mask) +{ + set->sig[0] = ~mask; + switch ((64 / 64)) { + default: + memset(&set->sig[1], -1, sizeof(long)*((64 / 64)-1)); + break; + case 2: set->sig[1] = -1; + case 1: ; + } +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_sigpending(struct sigpending *sig) +{ + sigemptyset(&sig->signal); + INIT_LIST_HEAD(&sig->list); +} + +extern void flush_sigqueue(struct sigpending *queue); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int valid_signal(unsigned long sig) +{ + return sig <= 64 ? 1 : 0; +} + +struct timespec; +struct pt_regs; +enum pid_type; + +extern int next_signal(struct sigpending *pending, sigset_t *mask); +extern int do_send_sig_info(int sig, struct kernel_siginfo *info, + struct task_struct *p, enum pid_type type); +extern int group_send_sig_info(int sig, struct kernel_siginfo *info, + struct task_struct *p, enum pid_type type); +extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); +extern int sigprocmask(int, sigset_t *, sigset_t *); +extern void set_current_blocked(sigset_t *); +extern void __set_current_blocked(const sigset_t *); +extern int show_unhandled_signals; + +extern bool get_signal(struct ksignal *ksig); +extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); +extern void exit_signals(struct task_struct *tsk); +extern void kernel_sigaction(int, __sighandler_t); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void allow_signal(int sig) +{ + + + + + + kernel_sigaction(sig, (( __sighandler_t)2)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void allow_kernel_signal(int sig) +{ + + + + + + kernel_sigaction(sig, (( __sighandler_t)3)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void disallow_signal(int sig) +{ + kernel_sigaction(sig, (( __sighandler_t)1)); +} + +extern struct kmem_cache *sighand_cachep; + +extern bool unhandled_signal(struct task_struct *tsk, int sig); +# 450 "./include/linux/signal.h" +void signals_init(void); + +int restore_altstack(const stack_t *); +int __save_altstack(stack_t *, unsigned long); +# 466 "./include/linux/signal.h" +struct seq_file; +extern void render_sigset_t(struct seq_file *, const char *, sigset_t *); +# 7 "./include/linux/sched/signal.h" 2 +# 1 "./include/linux/sched.h" 1 +# 10 "./include/linux/sched.h" +# 1 "./include/uapi/linux/sched.h" 1 +# 92 "./include/uapi/linux/sched.h" +struct clone_args { + __u64 __attribute__((aligned(8))) flags; + __u64 __attribute__((aligned(8))) pidfd; + __u64 __attribute__((aligned(8))) child_tid; + __u64 __attribute__((aligned(8))) parent_tid; + __u64 __attribute__((aligned(8))) exit_signal; + __u64 __attribute__((aligned(8))) stack; + __u64 __attribute__((aligned(8))) stack_size; + __u64 __attribute__((aligned(8))) tls; + __u64 __attribute__((aligned(8))) set_tid; + __u64 __attribute__((aligned(8))) set_tid_size; + __u64 __attribute__((aligned(8))) cgroup; +}; +# 11 "./include/linux/sched.h" 2 + + + + +# 1 "./include/linux/sem.h" 1 + + + + +# 1 "./include/uapi/linux/sem.h" 1 + + + + +# 1 "./include/linux/ipc.h" 1 + + + + + + +# 1 "./include/linux/rhashtable-types.h" 1 +# 17 "./include/linux/rhashtable-types.h" +struct rhash_head { + struct rhash_head *next; +}; + +struct rhlist_head { + struct rhash_head rhead; + struct rhlist_head *next; +}; + +struct bucket_table; + + + + + + +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + +typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); +typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed); +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, + const void *obj); +# 56 "./include/linux/rhashtable-types.h" +struct rhashtable_params { + u16 nelem_hint; + u16 key_len; + u16 key_offset; + u16 head_offset; + unsigned int max_size; + u16 min_size; + bool automatic_shrinking; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; +}; +# 81 "./include/linux/rhashtable-types.h" +struct rhashtable { + struct bucket_table *tbl; + unsigned int key_len; + unsigned int max_elems; + struct rhashtable_params p; + bool rhlist; + struct work_struct run_work; + struct mutex mutex; + spinlock_t lock; + atomic_t nelems; +}; + + + + + +struct rhltable { + struct rhashtable ht; +}; + + + + + + +struct rhashtable_walker { + struct list_head list; + struct bucket_table *tbl; +}; +# 120 "./include/linux/rhashtable-types.h" +struct rhashtable_iter { + struct rhashtable *ht; + struct rhash_head *p; + struct rhlist_head *list; + struct rhashtable_walker walker; + unsigned int slot; + unsigned int skip; + bool end_of_table; +}; + +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params); +int rhltable_init(struct rhltable *hlt, + const struct rhashtable_params *params); +# 8 "./include/linux/ipc.h" 2 +# 1 "./include/uapi/linux/ipc.h" 1 +# 10 "./include/uapi/linux/ipc.h" +struct ipc_perm +{ + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; +}; + + +# 1 "./arch/x86/include/generated/uapi/asm/ipcbuf.h" 1 +# 1 "./include/uapi/asm-generic/ipcbuf.h" 1 +# 22 "./include/uapi/asm-generic/ipcbuf.h" +struct ipc64_perm { + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + + unsigned char __pad1[4 - sizeof(__kernel_mode_t)]; + unsigned short seq; + unsigned short __pad2; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; +}; +# 1 "./arch/x86/include/generated/uapi/asm/ipcbuf.h" 2 +# 23 "./include/uapi/linux/ipc.h" 2 +# 58 "./include/uapi/linux/ipc.h" +struct ipc_kludge { + struct msgbuf *msgp; + long msgtyp; +}; +# 9 "./include/linux/ipc.h" 2 + + + +struct kern_ipc_perm { + spinlock_t lock; + bool deleted; + int id; + key_t key; + kuid_t uid; + kgid_t gid; + kuid_t cuid; + kgid_t cgid; + umode_t mode; + unsigned long seq; + void *security; + + struct rhash_head khtnode; + + struct callback_head rcu; + refcount_t refcount; +} __attribute__((__aligned__((1 << (6))))) __attribute__((__designated_init__)); +# 6 "./include/uapi/linux/sem.h" 2 +# 25 "./include/uapi/linux/sem.h" +struct semid_ds { + struct ipc_perm sem_perm; + __kernel_old_time_t sem_otime; + __kernel_old_time_t sem_ctime; + struct sem *sem_base; + struct sem_queue *sem_pending; + struct sem_queue **sem_pending_last; + struct sem_undo *undo; + unsigned short sem_nsems; +}; + + +# 1 "./arch/x86/include/uapi/asm/sembuf.h" 1 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/ipcbuf.h" 1 +# 6 "./arch/x86/include/uapi/asm/sembuf.h" 2 +# 18 "./arch/x86/include/uapi/asm/sembuf.h" +struct semid64_ds { + struct ipc64_perm sem_perm; + + + + + + + __kernel_long_t sem_otime; + __kernel_ulong_t __unused1; + __kernel_long_t sem_ctime; + __kernel_ulong_t __unused2; + + __kernel_ulong_t sem_nsems; + __kernel_ulong_t __unused3; + __kernel_ulong_t __unused4; +}; +# 38 "./include/uapi/linux/sem.h" 2 + + +struct sembuf { + unsigned short sem_num; + short sem_op; + short sem_flg; +}; + + +union semun { + int val; + struct semid_ds *buf; + unsigned short *array; + struct seminfo *__buf; + void *__pad; +}; + +struct seminfo { + int semmap; + int semmni; + int semmns; + int semmnu; + int semmsl; + int semopm; + int semume; + int semusz; + int semvmx; + int semaem; +}; +# 6 "./include/linux/sem.h" 2 + +struct task_struct; +struct sem_undo_list; + + + +struct sysv_sem { + struct sem_undo_list *undo_list; +}; + +extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); +extern void exit_sem(struct task_struct *tsk); +# 16 "./include/linux/sched.h" 2 +# 1 "./include/linux/shm.h" 1 + + + + + + +# 1 "./include/uapi/linux/shm.h" 1 + + + + + + +# 1 "./include/uapi/asm-generic/hugetlb_encode.h" 1 +# 8 "./include/uapi/linux/shm.h" 2 +# 28 "./include/uapi/linux/shm.h" +struct shmid_ds { + struct ipc_perm shm_perm; + int shm_segsz; + __kernel_old_time_t shm_atime; + __kernel_old_time_t shm_dtime; + __kernel_old_time_t shm_ctime; + __kernel_ipc_pid_t shm_cpid; + __kernel_ipc_pid_t shm_lpid; + unsigned short shm_nattch; + unsigned short shm_unused; + void *shm_unused2; + void *shm_unused3; +}; + + +# 1 "./arch/x86/include/uapi/asm/shmbuf.h" 1 + + + + + +# 1 "./include/uapi/asm-generic/shmbuf.h" 1 +# 25 "./include/uapi/asm-generic/shmbuf.h" +struct shmid64_ds { + struct ipc64_perm shm_perm; + size_t shm_segsz; + + long shm_atime; + long shm_dtime; + long shm_ctime; +# 40 "./include/uapi/asm-generic/shmbuf.h" + __kernel_pid_t shm_cpid; + __kernel_pid_t shm_lpid; + unsigned long shm_nattch; + unsigned long __unused4; + unsigned long __unused5; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; +# 7 "./arch/x86/include/uapi/asm/shmbuf.h" 2 +# 44 "./include/uapi/linux/shm.h" 2 +# 93 "./include/uapi/linux/shm.h" +struct shminfo { + int shmmax; + int shmmin; + int shmmni; + int shmseg; + int shmall; +}; + +struct shm_info { + int used_ids; + __kernel_ulong_t shm_tot; + __kernel_ulong_t shm_rss; + __kernel_ulong_t shm_swp; + __kernel_ulong_t swap_attempts; + __kernel_ulong_t swap_successes; +}; +# 8 "./include/linux/shm.h" 2 +# 1 "./arch/x86/include/asm/shmparam.h" 1 +# 9 "./include/linux/shm.h" 2 + +struct file; + + +struct sysv_shm { + struct list_head shm_clist; +}; + +long do_shmat(int shmid, char *shmaddr, int shmflg, unsigned long *addr, + unsigned long shmlba); +bool is_file_shm_hugepages(struct file *file); +void exit_shm(struct task_struct *task); +# 17 "./include/linux/sched.h" 2 +# 1 "./include/linux/kcov.h" 1 + + + + +# 1 "./include/uapi/linux/kcov.h" 1 +# 11 "./include/uapi/linux/kcov.h" +struct kcov_remote_arg { + __u32 trace_mode; + __u32 area_size; + __u32 num_handles; + __u64 __attribute__((aligned(8))) common_handle; + __u64 __attribute__((aligned(8))) handles[0]; +}; +# 26 "./include/uapi/linux/kcov.h" +enum { +# 35 "./include/uapi/linux/kcov.h" + KCOV_TRACE_PC = 0, + + KCOV_TRACE_CMP = 1, +}; +# 56 "./include/uapi/linux/kcov.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u64 kcov_remote_handle(__u64 subsys, __u64 inst) +{ + if (subsys & ~(0xffull << 56) || inst & ~(0xffffffffull)) + return 0; + return subsys | inst; +} +# 6 "./include/linux/kcov.h" 2 + +struct task_struct; + + + +enum kcov_mode { + + KCOV_MODE_DISABLED = 0, + + KCOV_MODE_INIT = 1, + + + + + KCOV_MODE_TRACE_PC = 2, + + KCOV_MODE_TRACE_CMP = 3, +}; + + + +void kcov_task_init(struct task_struct *t); +void kcov_task_exit(struct task_struct *t); +# 41 "./include/linux/kcov.h" +void kcov_remote_start(u64 handle); +void kcov_remote_stop(void); +u64 kcov_common_handle(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kcov_remote_start_common(u64 id) +{ + kcov_remote_start(kcov_remote_handle((0x00ull << 56), id)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kcov_remote_start_usb(u64 id) +{ + kcov_remote_start(kcov_remote_handle((0x01ull << 56), id)); +} +# 18 "./include/linux/sched.h" 2 + +# 1 "./include/linux/plist.h" 1 +# 79 "./include/linux/plist.h" +struct plist_head { + struct list_head node_list; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; +# 121 "./include/linux/plist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +plist_head_init(struct plist_head *head) +{ + INIT_LIST_HEAD(&head->node_list); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void plist_node_init(struct plist_node *node, int prio) +{ + node->prio = prio; + INIT_LIST_HEAD(&node->prio_list); + INIT_LIST_HEAD(&node->node_list); +} + +extern void plist_add(struct plist_node *node, struct plist_head *head); +extern void plist_del(struct plist_node *node, struct plist_head *head); + +extern void plist_requeue(struct plist_node *node, struct plist_head *head); +# 210 "./include/linux/plist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int plist_head_empty(const struct plist_head *head) +{ + return list_empty(&head->node_list); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int plist_node_empty(const struct plist_node *node) +{ + return list_empty(&node->node_list); +} +# 280 "./include/linux/plist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct plist_node *plist_first(const struct plist_head *head) +{ + return ({ void *__mptr = (void *)(head->node_list.next); do { extern void __compiletime_assert_833(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(head->node_list.next)), typeof(((struct plist_node *)0)->node_list)) && !__builtin_types_compatible_p(typeof(*(head->node_list.next)), typeof(void))))) __compiletime_assert_833(); } while (0); ((struct plist_node *)(__mptr - __builtin_offsetof(struct plist_node, node_list))); }) + ; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct plist_node *plist_last(const struct plist_head *head) +{ + return ({ void *__mptr = (void *)(head->node_list.prev); do { extern void __compiletime_assert_834(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(head->node_list.prev)), typeof(((struct plist_node *)0)->node_list)) && !__builtin_types_compatible_p(typeof(*(head->node_list.prev)), typeof(void))))) __compiletime_assert_834(); } while (0); ((struct plist_node *)(__mptr - __builtin_offsetof(struct plist_node, node_list))); }) + ; +} +# 20 "./include/linux/sched.h" 2 +# 1 "./include/linux/hrtimer.h" 1 +# 15 "./include/linux/hrtimer.h" +# 1 "./include/linux/hrtimer_defs.h" 1 +# 16 "./include/linux/hrtimer.h" 2 + + + + + +# 1 "./include/linux/timerqueue.h" 1 +# 9 "./include/linux/timerqueue.h" +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + + +extern bool timerqueue_add(struct timerqueue_head *head, + struct timerqueue_node *node); +extern bool timerqueue_del(struct timerqueue_head *head, + struct timerqueue_node *node); +extern struct timerqueue_node *timerqueue_iterate_next( + struct timerqueue_node *node); +# 33 "./include/linux/timerqueue.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) +{ + struct rb_node *leftmost = (&head->rb_root)->rb_leftmost; + + return ({ void *__mptr = (void *)(leftmost); do { extern void __compiletime_assert_835(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(leftmost)), typeof(((struct timerqueue_node *)0)->node)) && !__builtin_types_compatible_p(typeof(*(leftmost)), typeof(void))))) __compiletime_assert_835(); } while (0); ((struct timerqueue_node *)(__mptr - __builtin_offsetof(struct timerqueue_node, node))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void timerqueue_init(struct timerqueue_node *node) +{ + ((&node->node)->__rb_parent_color = (unsigned long)(&node->node)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timerqueue_node_queued(struct timerqueue_node *node) +{ + return !((&node->node)->__rb_parent_color == (unsigned long)(&node->node)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool timerqueue_node_expires(struct timerqueue_node *node) +{ + return node->expires; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void timerqueue_init_head(struct timerqueue_head *head) +{ + head->rb_root = (struct rb_root_cached) { {((void *)0), }, ((void *)0) }; +} +# 22 "./include/linux/hrtimer.h" 2 + +struct hrtimer_clock_base; +struct hrtimer_cpu_base; +# 38 "./include/linux/hrtimer.h" +enum hrtimer_mode { + HRTIMER_MODE_ABS = 0x00, + HRTIMER_MODE_REL = 0x01, + HRTIMER_MODE_PINNED = 0x02, + HRTIMER_MODE_SOFT = 0x04, + HRTIMER_MODE_HARD = 0x08, + + HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, + HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, + + HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, + + HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, + HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, + + HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, + + HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, +}; + + + + +enum hrtimer_restart { + HRTIMER_NORESTART, + HRTIMER_RESTART, +}; +# 117 "./include/linux/hrtimer.h" +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; +}; +# 135 "./include/linux/hrtimer.h" +struct hrtimer_sleeper { + struct hrtimer timer; + struct task_struct *task; +}; +# 158 "./include/linux/hrtimer.h" +struct hrtimer_clock_base { + struct hrtimer_cpu_base *cpu_base; + unsigned int index; + clockid_t clockid; + seqcount_t seq; + struct hrtimer *running; + struct timerqueue_head active; + ktime_t (*get_time)(void); + ktime_t offset; +} __attribute__((__aligned__((1 << (6))))); + +enum hrtimer_base_type { + HRTIMER_BASE_MONOTONIC, + HRTIMER_BASE_REALTIME, + HRTIMER_BASE_BOOTTIME, + HRTIMER_BASE_TAI, + HRTIMER_BASE_MONOTONIC_SOFT, + HRTIMER_BASE_REALTIME_SOFT, + HRTIMER_BASE_BOOTTIME_SOFT, + HRTIMER_BASE_TAI_SOFT, + HRTIMER_MAX_CLOCK_BASES, +}; +# 213 "./include/linux/hrtimer.h" +struct hrtimer_cpu_base { + raw_spinlock_t lock; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; + unsigned int hres_active : 1, + in_hrtirq : 1, + hang_detected : 1, + softirq_activated : 1; + + unsigned int nr_events; + unsigned short nr_retries; + unsigned short nr_hangs; + unsigned int max_hang_time; + + + + + + ktime_t expires_next; + struct hrtimer *next_timer; + ktime_t softirq_expires_next; + struct hrtimer *softirq_next_timer; + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; +} __attribute__((__aligned__((1 << (6))))); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) +{ + timer->node.expires = time; + timer->_softexpires = time; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) +{ + timer->_softexpires = time; + timer->node.expires = ktime_add_safe(time, delta); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta) +{ + timer->_softexpires = time; + timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) +{ + timer->node.expires = tv64; + timer->_softexpires = tv64; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) +{ + timer->node.expires = ktime_add_safe(timer->node.expires, time); + timer->_softexpires = ktime_add_safe(timer->_softexpires, time); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) +{ + timer->node.expires = ((timer->node.expires) + (ns)); + timer->_softexpires = ((timer->_softexpires) + (ns)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_get_expires(const struct hrtimer *timer) +{ + return timer->node.expires; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) +{ + return timer->_softexpires; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) +{ + return timer->node.expires; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) +{ + return timer->_softexpires; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer) +{ + return ktime_to_ns(timer->node.expires); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) +{ + return ((timer->node.expires) - (timer->base->get_time())); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer) +{ + return timer->base->get_time(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hrtimer_is_hres_active(struct hrtimer *timer) +{ + return 1 ? + timer->base->cpu_base->hres_active : 0; +} + + +struct clock_event_device; + +extern void hrtimer_interrupt(struct clock_event_device *dev); + +extern void clock_was_set_delayed(void); + +extern unsigned int hrtimer_resolution; +# 332 "./include/linux/hrtimer.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t +__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now) +{ + ktime_t rem = ((timer->node.expires) - (now)); + + + + + + if (0 && timer->is_rel) + rem -= hrtimer_resolution; + return rem; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t +hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) +{ + return __hrtimer_expires_remaining_adjusted(timer, + timer->base->get_time()); +} + +extern void clock_was_set(void); + +extern void timerfd_clock_was_set(void); + + + +extern void hrtimers_resume(void); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_tick_cpu_device; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tick_device) tick_cpu_device; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_cancel_wait_running(struct hrtimer *timer) +{ + cpu_relax(); +} + + + + + +extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, + enum hrtimer_mode mode); +extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, + enum hrtimer_mode mode); + + +extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, + enum hrtimer_mode mode); +extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode); + +extern void destroy_hrtimer_on_stack(struct hrtimer *timer); +# 407 "./include/linux/hrtimer.h" +extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + u64 range_ns, const enum hrtimer_mode mode); +# 418 "./include/linux/hrtimer.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_start(struct hrtimer *timer, ktime_t tim, + const enum hrtimer_mode mode) +{ + hrtimer_start_range_ns(timer, tim, 0, mode); +} + +extern int hrtimer_cancel(struct hrtimer *timer); +extern int hrtimer_try_to_cancel(struct hrtimer *timer); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_start_expires(struct hrtimer *timer, + enum hrtimer_mode mode) +{ + u64 delta; + ktime_t soft, hard; + soft = hrtimer_get_softexpires(timer); + hard = hrtimer_get_expires(timer); + delta = ktime_to_ns(((hard) - (soft))); + hrtimer_start_range_ns(timer, soft, delta, mode); +} + +void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, + enum hrtimer_mode mode); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hrtimer_restart(struct hrtimer *timer) +{ + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); +} + + +extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t hrtimer_get_remaining(const struct hrtimer *timer) +{ + return __hrtimer_get_remaining(timer, false); +} + +extern u64 hrtimer_get_next_event(void); +extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); + +extern bool hrtimer_active(const struct hrtimer *timer); +# 467 "./include/linux/hrtimer.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hrtimer_is_queued(struct hrtimer *timer) +{ + + return !!(({ do { extern void __compiletime_assert_836(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(timer->state) == sizeof(char) || sizeof(timer->state) == sizeof(short) || sizeof(timer->state) == sizeof(int) || sizeof(timer->state) == sizeof(long)) || sizeof(timer->state) == sizeof(long long))) __compiletime_assert_836(); } while (0); ({ typeof( _Generic((timer->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (timer->state))) __x = (*(const volatile typeof( _Generic((timer->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (timer->state))) *)&(timer->state)); do { } while (0); (typeof(timer->state))__x; }); }) & 0x01); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hrtimer_callback_running(struct hrtimer *timer) +{ + return timer->base->running == timer; +} + + +extern u64 +hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); +# 502 "./include/linux/hrtimer.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 hrtimer_forward_now(struct hrtimer *timer, + ktime_t interval) +{ + return hrtimer_forward(timer, timer->base->get_time(), interval); +} + + + +extern int nanosleep_copyout(struct restart_block *, struct timespec64 *); +extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, + const clockid_t clockid); + +extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, + const enum hrtimer_mode mode); +extern int schedule_hrtimeout_range_clock(ktime_t *expires, + u64 delta, + const enum hrtimer_mode mode, + clockid_t clock_id); +extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); + + +extern void hrtimer_run_queues(void); + + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) hrtimers_init(void); + + +extern void sysrq_timer_list_show(void); + +int hrtimers_prepare_cpu(unsigned int cpu); + +int hrtimers_dead_cpu(unsigned int cpu); +# 21 "./include/linux/sched.h" 2 +# 1 "./include/linux/seccomp.h" 1 + + + + +# 1 "./include/uapi/linux/seccomp.h" 1 +# 60 "./include/uapi/linux/seccomp.h" +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; +# 109 "./include/uapi/linux/seccomp.h" +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; +# 6 "./include/linux/seccomp.h" 2 +# 16 "./include/linux/seccomp.h" +# 1 "./arch/x86/include/asm/seccomp.h" 1 + + + + +# 1 "./arch/x86/include/asm/unistd.h" 1 + + + + +# 1 "./arch/x86/include/uapi/asm/unistd.h" 1 +# 6 "./arch/x86/include/asm/unistd.h" 2 +# 20 "./arch/x86/include/asm/unistd.h" +# 1 "./arch/x86/include/generated/uapi/asm/unistd_64.h" 1 +# 21 "./arch/x86/include/asm/unistd.h" 2 +# 1 "./arch/x86/include/generated/asm/unistd_64_x32.h" 1 +# 22 "./arch/x86/include/asm/unistd.h" 2 +# 1 "./arch/x86/include/generated/asm/unistd_32_ia32.h" 1 +# 23 "./arch/x86/include/asm/unistd.h" 2 +# 6 "./arch/x86/include/asm/seccomp.h" 2 + + + + + + +# 1 "./arch/x86/include/asm/ia32_unistd.h" 1 +# 13 "./arch/x86/include/asm/seccomp.h" 2 + + + + + + +# 1 "./include/asm-generic/seccomp.h" 1 +# 11 "./include/asm-generic/seccomp.h" +# 1 "./include/uapi/linux/unistd.h" 1 +# 12 "./include/asm-generic/seccomp.h" 2 +# 31 "./include/asm-generic/seccomp.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const int *get_compat_mode1_syscalls(void) +{ + static const int mode1_syscalls_32[] = { + 3, 4, + 1, 119, + 0, + }; + return mode1_syscalls_32; +} +# 20 "./arch/x86/include/asm/seccomp.h" 2 +# 17 "./include/linux/seccomp.h" 2 + +struct seccomp_filter; +# 30 "./include/linux/seccomp.h" +struct seccomp { + int mode; + struct seccomp_filter *filter; +}; + + +extern int __secure_computing(const struct seccomp_data *sd); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int secure_computing(void) +{ + if (__builtin_expect(!!(test_ti_thread_flag(((struct thread_info *)get_current()), 8)), 0)) + return __secure_computing(((void *)0)); + return 0; +} + + + + +extern long prctl_get_seccomp(void); +extern long prctl_set_seccomp(unsigned long, void *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int seccomp_mode(struct seccomp *s) +{ + return s->mode; +} +# 85 "./include/linux/seccomp.h" +extern void put_seccomp_filter(struct task_struct *tsk); +extern void get_seccomp_filter(struct task_struct *tsk); +# 99 "./include/linux/seccomp.h" +extern long seccomp_get_filter(struct task_struct *task, + unsigned long filter_off, void *data); +extern long seccomp_get_metadata(struct task_struct *task, + unsigned long filter_off, void *data); +# 22 "./include/linux/sched.h" 2 + + + +# 1 "./include/linux/resource.h" 1 + + + + +# 1 "./include/uapi/linux/resource.h" 1 +# 24 "./include/uapi/linux/resource.h" +struct rusage { + struct __kernel_old_timeval ru_utime; + struct __kernel_old_timeval ru_stime; + __kernel_long_t ru_maxrss; + __kernel_long_t ru_ixrss; + __kernel_long_t ru_idrss; + __kernel_long_t ru_isrss; + __kernel_long_t ru_minflt; + __kernel_long_t ru_majflt; + __kernel_long_t ru_nswap; + __kernel_long_t ru_inblock; + __kernel_long_t ru_oublock; + __kernel_long_t ru_msgsnd; + __kernel_long_t ru_msgrcv; + __kernel_long_t ru_nsignals; + __kernel_long_t ru_nvcsw; + __kernel_long_t ru_nivcsw; +}; + +struct rlimit { + __kernel_ulong_t rlim_cur; + __kernel_ulong_t rlim_max; +}; + + + +struct rlimit64 { + __u64 rlim_cur; + __u64 rlim_max; +}; +# 78 "./include/uapi/linux/resource.h" +# 1 "./arch/x86/include/generated/uapi/asm/resource.h" 1 +# 1 "./include/asm-generic/resource.h" 1 + + + + +# 1 "./include/uapi/asm-generic/resource.h" 1 +# 6 "./include/asm-generic/resource.h" 2 +# 1 "./arch/x86/include/generated/uapi/asm/resource.h" 2 +# 79 "./include/uapi/linux/resource.h" 2 +# 6 "./include/linux/resource.h" 2 + + +struct task_struct; + +void getrusage(struct task_struct *p, int who, struct rusage *ru); +int do_prlimit(struct task_struct *tsk, unsigned int resource, + struct rlimit *new_rlim, struct rlimit *old_rlim); +# 26 "./include/linux/sched.h" 2 +# 1 "./include/linux/latencytop.h" 1 +# 14 "./include/linux/latencytop.h" +struct task_struct; + + + + + + +struct latency_record { + unsigned long backtrace[12]; + unsigned int count; + unsigned long time; + unsigned long max; +}; + + + +extern int latencytop_enabled; +void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +account_scheduler_latency(struct task_struct *task, int usecs, int inter) +{ + if (__builtin_expect(!!(latencytop_enabled), 0)) + __account_scheduler_latency(task, usecs, inter); +} + +void clear_tsk_latency_tracing(struct task_struct *p); + +int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +# 27 "./include/linux/sched.h" 2 +# 1 "./include/linux/sched/prio.h" 1 +# 48 "./include/linux/sched/prio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long nice_to_rlimit(long nice) +{ + return (19 - nice + 1); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long rlimit_to_nice(long prio) +{ + return (19 - prio + 1); +} +# 28 "./include/linux/sched.h" 2 +# 1 "./include/linux/sched/types.h" 1 +# 17 "./include/linux/sched/types.h" +struct task_cputime { + u64 stime; + u64 utime; + unsigned long long sum_exec_runtime; +}; +# 29 "./include/linux/sched.h" 2 + + +# 1 "./include/linux/task_io_accounting.h" 1 +# 12 "./include/linux/task_io_accounting.h" +struct task_io_accounting { + + + u64 rchar; + + u64 wchar; + + u64 syscr; + + u64 syscw; + + + + + + + + u64 read_bytes; + + + + + + u64 write_bytes; +# 44 "./include/linux/task_io_accounting.h" + u64 cancelled_write_bytes; + +}; +# 32 "./include/linux/sched.h" 2 +# 1 "./include/linux/posix-timers.h" 1 + + + + + + +# 1 "./include/linux/alarmtimer.h" 1 +# 9 "./include/linux/alarmtimer.h" +struct rtc_device; + +enum alarmtimer_type { + ALARM_REALTIME, + ALARM_BOOTTIME, + + + ALARM_NUMTYPE, + + + ALARM_REALTIME_FREEZER, + ALARM_BOOTTIME_FREEZER, +}; + +enum alarmtimer_restart { + ALARMTIMER_NORESTART, + ALARMTIMER_RESTART, +}; +# 42 "./include/linux/alarmtimer.h" +struct alarm { + struct timerqueue_node node; + struct hrtimer timer; + enum alarmtimer_restart (*function)(struct alarm *, ktime_t now); + enum alarmtimer_type type; + int state; + void *data; +}; + +void alarm_init(struct alarm *alarm, enum alarmtimer_type type, + enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); +void alarm_start(struct alarm *alarm, ktime_t start); +void alarm_start_relative(struct alarm *alarm, ktime_t start); +void alarm_restart(struct alarm *alarm); +int alarm_try_to_cancel(struct alarm *alarm); +int alarm_cancel(struct alarm *alarm); + +u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); +u64 alarm_forward_now(struct alarm *alarm, ktime_t interval); +ktime_t alarm_expires_remaining(const struct alarm *alarm); + + + +struct rtc_device *alarmtimer_get_rtcdev(void); +# 8 "./include/linux/posix-timers.h" 2 + + +struct kernel_siginfo; +struct task_struct; +# 38 "./include/linux/posix-timers.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) clockid_t make_process_cpuclock(const unsigned int pid, + const clockid_t clock) +{ + return ((~pid) << 3) | clock; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) clockid_t make_thread_cpuclock(const unsigned int tid, + const clockid_t clock) +{ + return make_process_cpuclock(tid, clock | 4); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) clockid_t fd_to_clockid(const int fd) +{ + return make_process_cpuclock((unsigned int) fd, 3); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int clockid_to_fd(const clockid_t clk) +{ + return ~(clk >> 3); +} +# 69 "./include/linux/posix-timers.h" +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + int firing; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpu_timer_enqueue(struct timerqueue_head *head, + struct cpu_timer *ctmr) +{ + ctmr->head = head; + return timerqueue_add(head, &ctmr->node); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_timer_dequeue(struct cpu_timer *ctmr) +{ + if (ctmr->head) { + timerqueue_del(ctmr->head, &ctmr->node); + ctmr->head = ((void *)0); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 cpu_timer_getexpires(struct cpu_timer *ctmr) +{ + return ctmr->node.expires; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp) +{ + ctmr->node.expires = exp; +} + + + + + + +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; +# 122 "./include/linux/posix-timers.h" +struct posix_cputimers { + struct posix_cputimer_base bases[3]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void posix_cputimers_init(struct posix_cputimers *pct) +{ + memset(pct, 0, sizeof(*pct)); + pct->bases[0].nextevt = ((u64)~0ULL); + pct->bases[1].nextevt = ((u64)~0ULL); + pct->bases[2].nextevt = ((u64)~0ULL); +} + +void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, + u64 runtime) +{ + pct->bases[2].nextevt = runtime; +} +# 193 "./include/linux/posix-timers.h" +struct k_itimer { + struct list_head list; + struct hlist_node t_hash; + spinlock_t it_lock; + const struct k_clock *kclock; + clockid_t it_clock; + timer_t it_id; + int it_active; + s64 it_overrun; + s64 it_overrun_last; + int it_requeue_pending; + int it_sigev_notify; + ktime_t it_interval; + struct signal_struct *it_signal; + union { + struct pid *it_pid; + struct task_struct *it_process; + }; + struct sigqueue *sigq; + union { + struct { + struct hrtimer timer; + } real; + struct cpu_timer cpu; + struct { + struct alarm alarmtimer; + } alarm; + } it; + struct callback_head rcu; +}; + +void run_posix_cpu_timers(void); +void posix_cpu_timers_exit(struct task_struct *task); +void posix_cpu_timers_exit_group(struct task_struct *task); +void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, + u64 *newval, u64 *oldval); + +void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); + +void posixtimer_rearm(struct kernel_siginfo *info); +# 33 "./include/linux/sched.h" 2 +# 1 "./include/uapi/linux/rseq.h" 1 +# 16 "./include/uapi/linux/rseq.h" +enum rseq_cpu_id_state { + RSEQ_CPU_ID_UNINITIALIZED = -1, + RSEQ_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_flags { + RSEQ_FLAG_UNREGISTER = (1 << 0), +}; + +enum rseq_cs_flags_bit { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum rseq_cs_flags { + RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = + (1U << RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT), + RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = + (1U << RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT), + RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = + (1U << RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT), +}; + + + + + + +struct rseq_cs { + + __u32 version; + + __u32 flags; + __u64 start_ip; + + __u64 post_commit_offset; + __u64 abort_ip; +} __attribute__((aligned(4 * sizeof(__u64)))); + + + + + + + +struct rseq { +# 75 "./include/uapi/linux/rseq.h" + __u32 cpu_id_start; +# 90 "./include/uapi/linux/rseq.h" + __u32 cpu_id; +# 109 "./include/uapi/linux/rseq.h" + union { + __u64 ptr64; + + __u64 ptr; +# 124 "./include/uapi/linux/rseq.h" + } rseq_cs; +# 144 "./include/uapi/linux/rseq.h" + __u32 flags; +} __attribute__((aligned(4 * sizeof(__u64)))); +# 34 "./include/linux/sched.h" 2 +# 1 "./include/linux/kcsan.h" 1 +# 55 "./include/linux/kcsan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kcsan_init(void) { } +# 35 "./include/linux/sched.h" 2 + + +struct audit_context; +struct backing_dev_info; +struct bio_list; +struct blk_plug; +struct capture_control; +struct cfs_rq; +struct fs_struct; +struct futex_pi_state; +struct io_context; +struct mempolicy; +struct nameidata; +struct nsproxy; +struct perf_event_context; +struct pid_namespace; +struct pipe_inode_info; +struct rcu_node; +struct reclaim_state; +struct robust_list_head; +struct root_domain; +struct rq; +struct sched_attr; +struct sched_param; +struct seq_file; +struct sighand_struct; +struct signal_struct; +struct task_delay_info; +struct task_group; +# 216 "./include/linux/sched.h" +extern void scheduler_tick(void); + + + +extern long schedule_timeout(long timeout); +extern long schedule_timeout_interruptible(long timeout); +extern long schedule_timeout_killable(long timeout); +extern long schedule_timeout_uninterruptible(long timeout); +extern long schedule_timeout_idle(long timeout); + void schedule(void); +extern void schedule_preempt_disabled(void); + void preempt_schedule_irq(void); + +extern int __attribute__((__warn_unused_result__)) io_schedule_prepare(void); +extern void io_schedule_finish(int token); +extern long io_schedule_timeout(long timeout); +extern void io_schedule(void); +# 243 "./include/linux/sched.h" +struct prev_cputime { + + u64 utime; + u64 stime; + raw_spinlock_t lock; + +}; + +enum vtime_state { + + VTIME_INACTIVE = 0, + + VTIME_IDLE, + + VTIME_SYS, + + VTIME_USER, + + VTIME_GUEST, +}; + +struct vtime { + seqcount_t seqcount; + unsigned long long starttime; + enum vtime_state state; + unsigned int cpu; + u64 utime; + u64 stime; + u64 gtime; +}; + + + + + + + +enum uclamp_id { + UCLAMP_MIN = 0, + UCLAMP_MAX, + UCLAMP_CNT +}; + + +extern struct root_domain def_root_domain; +extern struct mutex sched_domains_mutex; + + +struct sched_info { + + + + + unsigned long pcount; + + + unsigned long long run_delay; + + + + + unsigned long long last_arrival; + + + unsigned long long last_queued; + + +}; +# 326 "./include/linux/sched.h" +struct load_weight { + unsigned long weight; + u32 inv_weight; +}; +# 353 "./include/linux/sched.h" +struct util_est { + unsigned int enqueued; + unsigned int ewma; + +} __attribute__((__aligned__(sizeof(u64)))); +# 404 "./include/linux/sched.h" +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + unsigned long load_avg; + unsigned long runnable_avg; + unsigned long util_avg; + struct util_est util_est; +} __attribute__((__aligned__((1 << (6))))); + +struct sched_statistics { + + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + u64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; + +}; + +struct sched_entity { + + struct load_weight load; + struct rb_node run_node; + struct list_head group_node; + unsigned int on_rq; + + u64 exec_start; + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; + + u64 nr_migrations; + + struct sched_statistics statistics; + + + int depth; + struct sched_entity *parent; + + struct cfs_rq *cfs_rq; + + struct cfs_rq *my_q; + + unsigned long runnable_weight; +# 486 "./include/linux/sched.h" + struct sched_avg avg; + +}; + +struct sched_rt_entity { + struct list_head run_list; + unsigned long timeout; + unsigned long watchdog_stamp; + unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; + + struct sched_rt_entity *back; + + struct sched_rt_entity *parent; + + struct rt_rq *rt_rq; + + struct rt_rq *my_q; + +} __attribute__((__designated_init__)); + +struct sched_dl_entity { + struct rb_node rb_node; + + + + + + + u64 dl_runtime; + u64 dl_deadline; + u64 dl_period; + u64 dl_bw; + u64 dl_density; + + + + + + + s64 runtime; + u64 deadline; + unsigned int flags; +# 555 "./include/linux/sched.h" + unsigned int dl_throttled : 1; + unsigned int dl_boosted : 1; + unsigned int dl_yielded : 1; + unsigned int dl_non_contending : 1; + unsigned int dl_overrun : 1; + + + + + + struct hrtimer dl_timer; +# 574 "./include/linux/sched.h" + struct hrtimer inactive_timer; +}; +# 604 "./include/linux/sched.h" +struct uclamp_se { + unsigned int value : ( __builtin_constant_p((1L << 10)) ? ( (((1L << 10)) == 0 || ((1L << 10)) == 1) ? 1 : ( __builtin_constant_p((1L << 10)) ? ( __builtin_constant_p((1L << 10)) ? ( ((1L << 10)) < 2 ? 0 : ((1L << 10)) & (1ULL << 63) ? 63 : ((1L << 10)) & (1ULL << 62) ? 62 : ((1L << 10)) & (1ULL << 61) ? 61 : ((1L << 10)) & (1ULL << 60) ? 60 : ((1L << 10)) & (1ULL << 59) ? 59 : ((1L << 10)) & (1ULL << 58) ? 58 : ((1L << 10)) & (1ULL << 57) ? 57 : ((1L << 10)) & (1ULL << 56) ? 56 : ((1L << 10)) & (1ULL << 55) ? 55 : ((1L << 10)) & (1ULL << 54) ? 54 : ((1L << 10)) & (1ULL << 53) ? 53 : ((1L << 10)) & (1ULL << 52) ? 52 : ((1L << 10)) & (1ULL << 51) ? 51 : ((1L << 10)) & (1ULL << 50) ? 50 : ((1L << 10)) & (1ULL << 49) ? 49 : ((1L << 10)) & (1ULL << 48) ? 48 : ((1L << 10)) & (1ULL << 47) ? 47 : ((1L << 10)) & (1ULL << 46) ? 46 : ((1L << 10)) & (1ULL << 45) ? 45 : ((1L << 10)) & (1ULL << 44) ? 44 : ((1L << 10)) & (1ULL << 43) ? 43 : ((1L << 10)) & (1ULL << 42) ? 42 : ((1L << 10)) & (1ULL << 41) ? 41 : ((1L << 10)) & (1ULL << 40) ? 40 : ((1L << 10)) & (1ULL << 39) ? 39 : ((1L << 10)) & (1ULL << 38) ? 38 : ((1L << 10)) & (1ULL << 37) ? 37 : ((1L << 10)) & (1ULL << 36) ? 36 : ((1L << 10)) & (1ULL << 35) ? 35 : ((1L << 10)) & (1ULL << 34) ? 34 : ((1L << 10)) & (1ULL << 33) ? 33 : ((1L << 10)) & (1ULL << 32) ? 32 : ((1L << 10)) & (1ULL << 31) ? 31 : ((1L << 10)) & (1ULL << 30) ? 30 : ((1L << 10)) & (1ULL << 29) ? 29 : ((1L << 10)) & (1ULL << 28) ? 28 : ((1L << 10)) & (1ULL << 27) ? 27 : ((1L << 10)) & (1ULL << 26) ? 26 : ((1L << 10)) & (1ULL << 25) ? 25 : ((1L << 10)) & (1ULL << 24) ? 24 : ((1L << 10)) & (1ULL << 23) ? 23 : ((1L << 10)) & (1ULL << 22) ? 22 : ((1L << 10)) & (1ULL << 21) ? 21 : ((1L << 10)) & (1ULL << 20) ? 20 : ((1L << 10)) & (1ULL << 19) ? 19 : ((1L << 10)) & (1ULL << 18) ? 18 : ((1L << 10)) & (1ULL << 17) ? 17 : ((1L << 10)) & (1ULL << 16) ? 16 : ((1L << 10)) & (1ULL << 15) ? 15 : ((1L << 10)) & (1ULL << 14) ? 14 : ((1L << 10)) & (1ULL << 13) ? 13 : ((1L << 10)) & (1ULL << 12) ? 12 : ((1L << 10)) & (1ULL << 11) ? 11 : ((1L << 10)) & (1ULL << 10) ? 10 : ((1L << 10)) & (1ULL << 9) ? 9 : ((1L << 10)) & (1ULL << 8) ? 8 : ((1L << 10)) & (1ULL << 7) ? 7 : ((1L << 10)) & (1ULL << 6) ? 6 : ((1L << 10)) & (1ULL << 5) ? 5 : ((1L << 10)) & (1ULL << 4) ? 4 : ((1L << 10)) & (1ULL << 3) ? 3 : ((1L << 10)) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof((1L << 10)) <= 4) ? __ilog2_u32((1L << 10)) : __ilog2_u64((1L << 10)) ) + 1 ) : __bits_per((1L << 10)) ); + unsigned int bucket_id : ( __builtin_constant_p(5) ? ( ((5) == 0 || (5) == 1) ? 1 : ( __builtin_constant_p(5) ? ( __builtin_constant_p(5) ? ( (5) < 2 ? 0 : (5) & (1ULL << 63) ? 63 : (5) & (1ULL << 62) ? 62 : (5) & (1ULL << 61) ? 61 : (5) & (1ULL << 60) ? 60 : (5) & (1ULL << 59) ? 59 : (5) & (1ULL << 58) ? 58 : (5) & (1ULL << 57) ? 57 : (5) & (1ULL << 56) ? 56 : (5) & (1ULL << 55) ? 55 : (5) & (1ULL << 54) ? 54 : (5) & (1ULL << 53) ? 53 : (5) & (1ULL << 52) ? 52 : (5) & (1ULL << 51) ? 51 : (5) & (1ULL << 50) ? 50 : (5) & (1ULL << 49) ? 49 : (5) & (1ULL << 48) ? 48 : (5) & (1ULL << 47) ? 47 : (5) & (1ULL << 46) ? 46 : (5) & (1ULL << 45) ? 45 : (5) & (1ULL << 44) ? 44 : (5) & (1ULL << 43) ? 43 : (5) & (1ULL << 42) ? 42 : (5) & (1ULL << 41) ? 41 : (5) & (1ULL << 40) ? 40 : (5) & (1ULL << 39) ? 39 : (5) & (1ULL << 38) ? 38 : (5) & (1ULL << 37) ? 37 : (5) & (1ULL << 36) ? 36 : (5) & (1ULL << 35) ? 35 : (5) & (1ULL << 34) ? 34 : (5) & (1ULL << 33) ? 33 : (5) & (1ULL << 32) ? 32 : (5) & (1ULL << 31) ? 31 : (5) & (1ULL << 30) ? 30 : (5) & (1ULL << 29) ? 29 : (5) & (1ULL << 28) ? 28 : (5) & (1ULL << 27) ? 27 : (5) & (1ULL << 26) ? 26 : (5) & (1ULL << 25) ? 25 : (5) & (1ULL << 24) ? 24 : (5) & (1ULL << 23) ? 23 : (5) & (1ULL << 22) ? 22 : (5) & (1ULL << 21) ? 21 : (5) & (1ULL << 20) ? 20 : (5) & (1ULL << 19) ? 19 : (5) & (1ULL << 18) ? 18 : (5) & (1ULL << 17) ? 17 : (5) & (1ULL << 16) ? 16 : (5) & (1ULL << 15) ? 15 : (5) & (1ULL << 14) ? 14 : (5) & (1ULL << 13) ? 13 : (5) & (1ULL << 12) ? 12 : (5) & (1ULL << 11) ? 11 : (5) & (1ULL << 10) ? 10 : (5) & (1ULL << 9) ? 9 : (5) & (1ULL << 8) ? 8 : (5) & (1ULL << 7) ? 7 : (5) & (1ULL << 6) ? 6 : (5) & (1ULL << 5) ? 5 : (5) & (1ULL << 4) ? 4 : (5) & (1ULL << 3) ? 3 : (5) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(5) <= 4) ? __ilog2_u32(5) : __ilog2_u64(5) ) + 1 ) : __bits_per(5) ); + unsigned int active : 1; + unsigned int user_defined : 1; +}; + + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; + u8 need_mb; + } b; + u32 s; +}; + +enum perf_event_task_context { + perf_invalid_context = -1, + perf_hw_context = 0, + perf_sw_context, + perf_nr_task_contexts, +}; + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct task_struct { + + + + + + struct thread_info thread_info; + + + volatile long state; + + + + + + + + void *stack; + refcount_t usage; + + unsigned int flags; + unsigned int ptrace; + + + struct llist_node wake_entry; + unsigned int wake_entry_type; + int on_cpu; + + + unsigned int cpu; + + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; +# 675 "./include/linux/sched.h" + int recent_used_cpu; + int wake_cpu; + + int on_rq; + + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; + + struct task_group *sched_task_group; + + struct sched_dl_entity dl; + + + + struct uclamp_se uclamp_req[UCLAMP_CNT]; + + struct uclamp_se uclamp[UCLAMP_CNT]; + + + + + struct hlist_head preempt_notifiers; + + + + unsigned int btrace_seq; + + + unsigned int policy; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t cpus_mask; +# 722 "./include/linux/sched.h" + unsigned long rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + + + + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + bool trc_reader_checked; + struct list_head trc_holdout_list; + + + struct sched_info sched_info; + + struct list_head tasks; + + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + + + struct mm_struct *mm; + struct mm_struct *active_mm; + + + struct vmacache vmacache; + + + struct task_rss_stat rss_stat; + + int exit_state; + int exit_code; + int exit_signal; + + int pdeath_signal; + + unsigned long jobctl; + + + unsigned int personality; + + + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; + unsigned sched_remote_wakeup:1; + + unsigned sched_psi_wake_requeue:1; + + + + unsigned :0; + + + + + unsigned in_execve:1; + unsigned in_iowait:1; + + unsigned restore_sigmask:1; + + + unsigned in_user_fault:1; + + + unsigned brk_randomized:1; + + + + unsigned no_cgroup_migration:1; + + unsigned frozen:1; + + + unsigned use_memdelay:1; + + + + unsigned in_memstall:1; + + + unsigned long atomic_flags; + + struct restart_block restart_block; + + pid_t pid; + pid_t tgid; + + + + unsigned long stack_canary; +# 823 "./include/linux/sched.h" + struct task_struct *real_parent; + + + struct task_struct *parent; + + + + + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + + + + + + + + struct list_head ptraced; + struct list_head ptrace_entry; + + + struct pid *thread_pid; + struct hlist_node pid_links[PIDTYPE_MAX]; + struct list_head thread_group; + struct list_head thread_node; + + struct completion *vfork_done; + + + int *set_child_tid; + + + int *clear_child_tid; + + u64 utime; + u64 stime; + + + + + u64 gtime; + struct prev_cputime prev_cputime; +# 874 "./include/linux/sched.h" + unsigned long nvcsw; + unsigned long nivcsw; + + + u64 start_time; + + + u64 start_boottime; + + + unsigned long min_flt; + unsigned long maj_flt; + + + struct posix_cputimers posix_cputimers; + + + + + const struct cred *ptracer_cred; + + + const struct cred *real_cred; + + + const struct cred *cred; + + + + struct key *cached_requested_key; +# 913 "./include/linux/sched.h" + char comm[16]; + + struct nameidata *nameidata; + + + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; + + + unsigned long last_switch_count; + unsigned long last_switch_time; + + + struct fs_struct *fs; + + + struct files_struct *files; + + + struct nsproxy *nsproxy; + + + struct signal_struct *signal; + struct sighand_struct *sighand; + sigset_t blocked; + sigset_t real_blocked; + + sigset_t saved_sigmask; + struct sigpending pending; + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + + struct callback_head *task_works; + + + + struct audit_context *audit_context; + + kuid_t loginuid; + unsigned int sessionid; + + struct seccomp seccomp; + + + u64 parent_exec_id; + u64 self_exec_id; + + + spinlock_t alloc_lock; + + + raw_spinlock_t pi_lock; + + struct wake_q_node wake_q; + + + + struct rb_root_cached pi_waiters; + + struct task_struct *pi_top_task; + + struct rt_mutex_waiter *pi_blocked_on; + + + + + struct mutex_waiter *blocked_on; + + + + int non_block_count; + + + + unsigned int irq_events; + unsigned int hardirq_threaded; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + int hardirqs_enabled; + int hardirq_context; + u64 hardirq_chain_key; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; + int softirqs_enabled; + int softirq_context; + int irq_config; + + + + + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[48UL]; + + + + unsigned int in_ubsan; + + + + void *journal_info; + + + struct bio_list *bio_list; + + + + struct blk_plug *plug; + + + + struct reclaim_state *reclaim_state; + + struct backing_dev_info *backing_dev_info; + + struct io_context *io_context; + + + struct capture_control *capture_control; + + + unsigned long ptrace_message; + kernel_siginfo_t *last_siginfo; + + struct task_io_accounting ioac; + + + unsigned int psi_flags; + + + + u64 acct_rss_mem1; + + u64 acct_vm_mem1; + + u64 acct_timexpd; + + + + nodemask_t mems_allowed; + + seqcount_t mems_allowed_seq; + int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; + + + + struct css_set *cgroups; + + struct list_head cg_list; + + + u32 closid; + u32 rmid; + + + struct robust_list_head *robust_list; + + struct compat_robust_list_head *compat_robust_list; + + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; + + + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + + + + + + + struct mempolicy *mempolicy; + short il_prev; + short pref_node_fork; + + + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + unsigned long numa_migrate_retry; + + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; +# 1118 "./include/linux/sched.h" + struct numa_group *numa_group; +# 1134 "./include/linux/sched.h" + unsigned long *numa_faults; + unsigned long total_numa_faults; + + + + + + + + unsigned long numa_faults_locality[3]; + + unsigned long numa_pages_migrated; + + + + struct rseq *rseq; + u32 rseq_sig; + + + + + unsigned long rseq_event_mask; + + + struct tlbflush_unmap_batch tlb_ubc; + + union { + refcount_t rcu_users; + struct callback_head rcu; + }; + + + struct pipe_inode_info *splice_pipe; + + struct page_frag task_frag; + + + struct task_delay_info *delays; + + + + int make_it_fail; + unsigned int fail_nth; + + + + + + int nr_dirtied; + int nr_dirtied_pause; + + unsigned long dirty_paused_when; + + + int latency_record_count; + struct latency_record latency_record[32]; + + + + + + u64 timer_slack_ns; + u64 default_timer_slack_ns; + + + unsigned int kasan_depth; + + + + + + + + int curr_ret_stack; + int curr_ret_depth; + + + struct ftrace_ret_stack *ret_stack; + + + unsigned long long ftrace_timestamp; + + + + + + atomic_t trace_overrun; + + + atomic_t tracing_graph_pause; + + + + + unsigned long trace; + + + unsigned long trace_recursion; + + + + + + + unsigned int kcov_mode; + + + unsigned int kcov_size; + + + void *kcov_area; + + + struct kcov *kcov; + + + u64 kcov_handle; + + + int kcov_sequence; + + + unsigned int kcov_softirq; + + + + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; + + + unsigned int memcg_nr_pages_over_high; + + + struct mem_cgroup *active_memcg; + + + + struct request_queue *throttle_queue; + + + + struct uprobe_task *utask; + + + unsigned int sequential_io; + unsigned int sequential_io_avg; + + + unsigned long task_state_change; + + int pagefault_disabled; + + struct task_struct *oom_reaper_list; + + + struct vm_struct *stack_vm_area; + + + + refcount_t stack_refcount; + + + int patch_state; + + + + void *security; +# 1310 "./include/linux/sched.h" + u64 mce_addr; + __u64 mce_ripv : 1, + mce_whole_page : 1, + __mce_reserved : 62; + struct callback_head mce_kill_me; + + + + + + + + + + struct thread_struct thread; + + + + + + + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid *task_pid(struct task_struct *task) +{ + return task->thread_pid; +} +# 1350 "./include/linux/sched.h" +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pid_nr(struct task_struct *tsk) +{ + return tsk->pid; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_tgid_nr(struct task_struct *tsk) +{ + return tsk->tgid; +} +# 1383 "./include/linux/sched.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pid_alive(const struct task_struct *p) +{ + return p->thread_pid != ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pgrp_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_session_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_tgid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) +{ + pid_t pid = 0; + + rcu_read_lock(); + if (pid_alive(tsk)) + pid = task_tgid_nr_ns(({ typeof(*(tsk->real_parent)) *________p1 = (typeof(*(tsk->real_parent)) *)({ do { extern void __compiletime_assert_837(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((tsk->real_parent)) == sizeof(char) || sizeof((tsk->real_parent)) == sizeof(short) || sizeof((tsk->real_parent)) == sizeof(int) || sizeof((tsk->real_parent)) == sizeof(long)) || sizeof((tsk->real_parent)) == sizeof(long long))) __compiletime_assert_837(); } while (0); ({ typeof( _Generic(((tsk->real_parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tsk->real_parent)))) __x = (*(const volatile typeof( _Generic(((tsk->real_parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tsk->real_parent)))) *)&((tsk->real_parent))); do { } while (0); (typeof((tsk->real_parent)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/sched.h", 1425, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(tsk->real_parent)) *)(________p1)); }), ns); + rcu_read_unlock(); + + return pid; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_ppid_nr(const struct task_struct *tsk) +{ + return task_ppid_nr_ns(tsk, &init_pid_ns); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pid_t task_pgrp_nr(struct task_struct *tsk) +{ + return task_pgrp_nr_ns(tsk, &init_pid_ns); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int task_state_index(struct task_struct *tsk) +{ + unsigned int tsk_state = ({ do { extern void __compiletime_assert_838(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(tsk->state) == sizeof(char) || sizeof(tsk->state) == sizeof(short) || sizeof(tsk->state) == sizeof(int) || sizeof(tsk->state) == sizeof(long)) || sizeof(tsk->state) == sizeof(long long))) __compiletime_assert_838(); } while (0); ({ typeof( _Generic((tsk->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (tsk->state))) __x = (*(const volatile typeof( _Generic((tsk->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (tsk->state))) *)&(tsk->state)); do { } while (0); (typeof(tsk->state))__x; }); }); + unsigned int state = (tsk_state | tsk->exit_state) & (0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040); + + do { extern void __compiletime_assert_839(void) __attribute__((__error__("BUILD_BUG_ON failed: " "((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) == 0 || ((((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) - 1)) != 0)"))); if (!(!(((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) == 0 || ((((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) - 1)) != 0)))) __compiletime_assert_839(); } while (0); + + if (tsk_state == (0x0002 | 0x0400)) + state = ((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1); + + return fls(state); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char task_index_to_char(unsigned int state) +{ + static const char state_char[] = "RSDTtXZPI"; + + do { extern void __compiletime_assert_840(void) __attribute__((__error__("BUILD_BUG_ON failed: " "1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1"))); if (!(!(1 + ( __builtin_constant_p((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) ? ( __builtin_constant_p((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) ? ( ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) < 2 ? 0 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 63) ? 63 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 62) ? 62 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 61) ? 61 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 60) ? 60 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 59) ? 59 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 58) ? 58 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 57) ? 57 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 56) ? 56 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 55) ? 55 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 54) ? 54 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 53) ? 53 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 52) ? 52 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 51) ? 51 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 50) ? 50 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 49) ? 49 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 48) ? 48 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 47) ? 47 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 46) ? 46 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 45) ? 45 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 44) ? 44 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 43) ? 43 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 42) ? 42 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 41) ? 41 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 40) ? 40 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 39) ? 39 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 38) ? 38 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 37) ? 37 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 36) ? 36 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 35) ? 35 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 34) ? 34 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 33) ? 33 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 32) ? 32 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 31) ? 31 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 30) ? 30 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 29) ? 29 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 28) ? 28 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 27) ? 27 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 26) ? 26 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 25) ? 25 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 24) ? 24 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 23) ? 23 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 22) ? 22 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 21) ? 21 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 20) ? 20 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 19) ? 19 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 18) ? 18 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 17) ? 17 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 16) ? 16 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 15) ? 15 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 14) ? 14 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 13) ? 13 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 12) ? 12 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 11) ? 11 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 10) ? 10 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 9) ? 9 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 8) ? 8 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 7) ? 7 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 6) ? 6 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 5) ? 5 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 4) ? 4 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 3) ? 3 : ((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) <= 4) ? __ilog2_u32((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) : __ilog2_u64((((0x0000 | 0x0001 | 0x0002 | 0x0004 | 0x0008 | 0x0010 | 0x0020 | 0x0040) + 1) << 1)) ) != sizeof(state_char) - 1))) __compiletime_assert_840(); } while (0); + + return state_char[state]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char task_state_to_char(struct task_struct *tsk) +{ + return task_index_to_char(task_state_index(tsk)); +} +# 1481 "./include/linux/sched.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_global_init(struct task_struct *tsk) +{ + return task_tgid_nr(tsk) == 1; +} + +extern struct pid *cad_pid; +# 1550 "./include/linux/sched.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_percpu_thread(void) +{ + + return (get_current()->flags & 0x04000000) && + (get_current()->nr_cpus_allowed == 1); + + + +} +# 1582 "./include/linux/sched.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_no_new_privs(struct task_struct *p) { return test_bit(0, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_no_new_privs(struct task_struct *p) { set_bit(0, &p->atomic_flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spread_page(struct task_struct *p) { return test_bit(1, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spread_page(struct task_struct *p) { set_bit(1, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spread_page(struct task_struct *p) { clear_bit(1, &p->atomic_flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spread_slab(struct task_struct *p) { return test_bit(2, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spread_slab(struct task_struct *p) { set_bit(2, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spread_slab(struct task_struct *p) { clear_bit(2, &p->atomic_flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ssb_disable(struct task_struct *p) { return test_bit(3, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ssb_disable(struct task_struct *p) { set_bit(3, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spec_ssb_disable(struct task_struct *p) { clear_bit(3, &p->atomic_flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ssb_noexec(struct task_struct *p) { return test_bit(7, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ssb_noexec(struct task_struct *p) { set_bit(7, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spec_ssb_noexec(struct task_struct *p) { clear_bit(7, &p->atomic_flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ssb_force_disable(struct task_struct *p) { return test_bit(4, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ssb_force_disable(struct task_struct *p) { set_bit(4, &p->atomic_flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ib_disable(struct task_struct *p) { return test_bit(5, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ib_disable(struct task_struct *p) { set_bit(5, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_clear_spec_ib_disable(struct task_struct *p) { clear_bit(5, &p->atomic_flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_spec_ib_force_disable(struct task_struct *p) { return test_bit(6, &p->atomic_flags); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_set_spec_ib_force_disable(struct task_struct *p) { set_bit(6, &p->atomic_flags); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +current_restore_flags(unsigned long orig_flags, unsigned long flags) +{ + get_current()->flags &= ~flags; + get_current()->flags |= orig_flags & flags; +} + +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); + +extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); +extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +# 1635 "./include/linux/sched.h" +extern int yield_to(struct task_struct *p, bool preempt); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int task_nice(const struct task_struct *p) +{ + return (((p)->static_prio) - (100 + (19 - -20 + 1) / 2)); +} + +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p); +extern int idle_cpu(int cpu); +extern int available_idle_cpu(int cpu); +extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); +extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern int sched_setattr(struct task_struct *, const struct sched_attr *); +extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); +extern struct task_struct *idle_task(int cpu); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_idle_task(const struct task_struct *p) +{ + return !!(p->flags & 0x00000002); +} + +extern struct task_struct *curr_task(int cpu); +extern void ia64_set_curr_task(int cpu, struct task_struct *p); + +void yield(void); + +union thread_union { + + struct task_struct task; + + + + + unsigned long stack[(((1UL) << 12) << (2 + 1))/sizeof(long)]; +}; + + + + + +extern unsigned long init_stack[(((1UL) << 12) << (2 + 1)) / sizeof(unsigned long)]; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct thread_info *task_thread_info(struct task_struct *task) +{ + return &task->thread_info; +} +# 1712 "./include/linux/sched.h" +extern struct task_struct *find_task_by_vpid(pid_t nr); +extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); + + + + +extern struct task_struct *find_get_task_by_vpid(pid_t nr); + +extern int wake_up_state(struct task_struct *tsk, unsigned int state); +extern int wake_up_process(struct task_struct *tsk); +extern void wake_up_new_task(struct task_struct *tsk); + + +extern void kick_process(struct task_struct *tsk); + + + + +extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_task_comm(struct task_struct *tsk, const char *from) +{ + __set_task_comm(tsk, from, false); +} + +extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void scheduler_ipi(void) +{ + + + + + + do { if (test_ti_thread_flag(((struct thread_info *)get_current()), 3)) set_preempt_need_resched(); } while (0); +} +extern unsigned long wait_task_inactive(struct task_struct *, long match_state); +# 1766 "./include/linux/sched.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + set_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + clear_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_tsk_thread_flag(struct task_struct *tsk, int flag, + bool value) +{ + update_ti_thread_flag(task_thread_info(tsk), flag, value); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_tsk_thread_flag(struct task_struct *tsk, int flag) +{ + return test_ti_thread_flag(task_thread_info(tsk), flag); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_tsk_need_resched(struct task_struct *tsk) +{ + set_tsk_thread_flag(tsk,3); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_tsk_need_resched(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk,3); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_tsk_need_resched(struct task_struct *tsk) +{ + return __builtin_expect(!!(test_tsk_thread_flag(tsk,3)), 0); +} +# 1819 "./include/linux/sched.h" +extern int _cond_resched(void); +# 1829 "./include/linux/sched.h" +extern int __cond_resched_lock(spinlock_t *lock); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cond_resched_rcu(void) +{ + + rcu_read_unlock(); + ({ ___might_sleep("include/linux/sched.h", 1840, 0); _cond_resched(); }); + rcu_read_lock(); + +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int spin_needbreak(spinlock_t *lock) +{ + + + + return 0; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool need_resched(void) +{ + return __builtin_expect(!!(test_ti_thread_flag(((struct thread_info *)get_current()), 3)), 0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int task_cpu(const struct task_struct *p) +{ + + return ({ do { extern void __compiletime_assert_841(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(p->cpu) == sizeof(char) || sizeof(p->cpu) == sizeof(short) || sizeof(p->cpu) == sizeof(int) || sizeof(p->cpu) == sizeof(long)) || sizeof(p->cpu) == sizeof(long long))) __compiletime_assert_841(); } while (0); ({ typeof( _Generic((p->cpu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->cpu))) __x = (*(const volatile typeof( _Generic((p->cpu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->cpu))) *)&(p->cpu)); do { } while (0); (typeof(p->cpu))__x; }); }); + + + +} + +extern void set_task_cpu(struct task_struct *p, unsigned int cpu); +# 1908 "./include/linux/sched.h" +extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); +extern long sched_getaffinity(pid_t pid, struct cpumask *mask); +# 1921 "./include/linux/sched.h" +enum rseq_event_mask_bits { + RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, + RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, + RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, +}; + +enum rseq_event_mask { + RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), + RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), + RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_set_notify_resume(struct task_struct *t) +{ + if (t->rseq) + set_tsk_thread_flag(t, 1); +} + +void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_handle_notify_resume(struct ksignal *ksig, + struct pt_regs *regs) +{ + if (get_current()->rseq) + __rseq_handle_notify_resume(ksig, regs); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_signal_deliver(struct ksignal *ksig, + struct pt_regs *regs) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + __set_bit(RSEQ_EVENT_SIGNAL_BIT, &get_current()->rseq_event_mask); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + rseq_handle_notify_resume(ksig, regs); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_preempt(struct task_struct *t) +{ + __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); + rseq_set_notify_resume(t); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_migrate(struct task_struct *t) +{ + __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); + rseq_set_notify_resume(t); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_fork(struct task_struct *t, unsigned long clone_flags) +{ + if (clone_flags & 0x00000100) { + t->rseq = ((void *)0); + t->rseq_sig = 0; + t->rseq_event_mask = 0; + } else { + t->rseq = get_current()->rseq; + t->rseq_sig = get_current()->rseq_sig; + t->rseq_event_mask = get_current()->rseq_event_mask; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rseq_execve(struct task_struct *t) +{ + t->rseq = ((void *)0); + t->rseq_sig = 0; + t->rseq_event_mask = 0; +} +# 2023 "./include/linux/sched.h" +void __exit_umh(struct task_struct *tsk); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void exit_umh(struct task_struct *tsk) +{ + if (__builtin_expect(!!(tsk->flags & 0x02000000), 0)) + __exit_umh(tsk); +} + + + +void rseq_syscall(struct pt_regs *regs); +# 2043 "./include/linux/sched.h" +const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); +char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); +int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); + +const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); +const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); +const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); + +int sched_trace_rq_cpu(struct rq *rq); + +const struct cpumask *sched_trace_rd_span(struct root_domain *rd); +# 8 "./include/linux/sched/signal.h" 2 +# 1 "./include/linux/sched/jobctl.h" 1 + + + + + + +struct task_struct; +# 37 "./include/linux/sched/jobctl.h" +extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask); +# 9 "./include/linux/sched/signal.h" 2 +# 1 "./include/linux/sched/task.h" 1 +# 11 "./include/linux/sched/task.h" +# 1 "./include/linux/uaccess.h" 1 +# 11 "./include/linux/uaccess.h" +# 1 "./arch/x86/include/asm/uaccess.h" 1 +# 12 "./arch/x86/include/asm/uaccess.h" +# 1 "./arch/x86/include/asm/smap.h" 1 +# 44 "./arch/x86/include/asm/smap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void clac(void) +{ + + asm volatile ("# ALT: oldnstr\n" "661:\n\t" "" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+20)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" ".byte 0x0f,0x01,0xca" "\n" "665""1" ":\n" ".popsection\n" : : : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void stac(void) +{ + + asm volatile ("# ALT: oldnstr\n" "661:\n\t" "" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+20)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" ".byte 0x0f,0x01,0xcb" "\n" "665""1" ":\n" ".popsection\n" : : : "memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long smap_save(void) +{ + unsigned long flags; + + asm volatile ("# smap_save\n\t" + "# ALT: oldnstr\n" "661:\n\t" "jmp 1f" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+20)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "" "\n" "665""1" ":\n" ".popsection\n" + "pushf; pop %0; " ".byte 0x0f,0x01,0xca" "\n\t" + "1:" + : "=rm" (flags) : : "memory", "cc"); + + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void smap_restore(unsigned long flags) +{ + asm volatile ("# smap_restore\n\t" + "# ALT: oldnstr\n" "661:\n\t" "jmp 1f" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 9*32+20)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "" "\n" "665""1" ":\n" ".popsection\n" + "push %0; popf\n\t" + "1:" + : : "g" (flags) : "memory", "cc"); +} +# 13 "./arch/x86/include/asm/uaccess.h" 2 +# 1 "./arch/x86/include/asm/extable.h" 1 +# 17 "./arch/x86/include/asm/extable.h" +struct exception_table_entry { + int insn, fixup, handler; +}; +struct pt_regs; +# 32 "./arch/x86/include/asm/extable.h" +extern int fixup_exception(struct pt_regs *regs, int trapnr, + unsigned long error_code, unsigned long fault_addr); +extern int fixup_bug(struct pt_regs *regs, int trapnr); +extern bool ex_has_fault_handler(unsigned long ip); +extern void early_fixup_exception(struct pt_regs *regs, int trapnr); +# 14 "./arch/x86/include/asm/uaccess.h" 2 +# 29 "./arch/x86/include/asm/uaccess.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_fs(mm_segment_t fs) +{ + get_current()->thread.addr_limit = fs; + + set_ti_thread_flag(((struct thread_info *)get_current()), 31); +} +# 43 "./arch/x86/include/asm/uaccess.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) +{ + + + + + + + + if (__builtin_constant_p(size)) + return __builtin_expect(!!(addr > limit - size), 0); + + + addr += size; + if (__builtin_expect(!!(addr < size), 0)) + return true; + return __builtin_expect(!!(addr > limit), 0); +} +# 69 "./arch/x86/include/asm/uaccess.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pagefault_disabled(void); +# 114 "./arch/x86/include/asm/uaccess.h" +extern int __get_user_1(void); +extern int __get_user_2(void); +extern int __get_user_4(void); +extern int __get_user_8(void); +extern int __get_user_bad(void); +# 211 "./arch/x86/include/asm/uaccess.h" +extern void __put_user_bad(void); + + + + + +extern void __put_user_1(void); +extern void __put_user_2(void); +extern void __put_user_4(void); +extern void __put_user_8(void); +# 381 "./arch/x86/include/asm/uaccess.h" +struct __large_struct { unsigned long buf[100]; }; +# 445 "./arch/x86/include/asm/uaccess.h" +extern unsigned long +copy_from_user_nmi(void *to, const void *from, unsigned long n); +extern __attribute__((__warn_unused_result__)) long +strncpy_from_user(char *dst, const char *src, long count); + +extern __attribute__((__warn_unused_result__)) long strnlen_user(const char *str, long n); + +unsigned long __attribute__((__warn_unused_result__)) clear_user(void *mem, unsigned long len); +unsigned long __attribute__((__warn_unused_result__)) __clear_user(void *mem, unsigned long len); +# 469 "./arch/x86/include/asm/uaccess.h" +# 1 "./arch/x86/include/asm/uaccess_64.h" 1 +# 20 "./arch/x86/include/asm/uaccess_64.h" +__attribute__((__warn_unused_result__)) unsigned long +copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); +__attribute__((__warn_unused_result__)) unsigned long +copy_user_generic_string(void *to, const void *from, unsigned len); +__attribute__((__warn_unused_result__)) unsigned long +copy_user_generic_unrolled(void *to, const void *from, unsigned len); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +copy_user_generic(void *to, const void *from, unsigned len) +{ + unsigned ret; + + + + + + + asm volatile ("# ALT: oldinstr2\n" "661:\n\t" "call %P[old]" "\n662:\n" "# ALT: padding2\n" ".skip -((" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")) > 0) * " "(" "((" "665""1""f-""664""1""f" ") ^ (((" "665""1""f-""664""1""f" ") ^ (" "665""2""f-""664""2""f" ")) & -(-((" "665""1""f-""664""1""f" ") < (" "665""2""f-""664""2""f" ")))))" " - (" "662b-661b" ")), 0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+16)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" " .long 661b - .\n" " .long " "664""2""f - .\n" " .word " "( 9*32+ 9)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""2""f-""664""2""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "call %P[new1]" "\n" "665""1" ":\n" "# ALT: replacement " "2" "\n" "664""2"":\n\t" "call %P[new2]" "\n" "665""2" ":\n" ".popsection\n" : "=a" (ret), "=D" (to), "=S" (from), "=d" (len), "+r" (current_stack_pointer) : [old] "i" (copy_user_generic_unrolled), [new1] "i" (copy_user_generic_string), [new2] "i" (copy_user_enhanced_fast_string), "1" (to), "2" (from), "3" (len) : "memory", "rcx", "r8", "r9", "r10", "r11") + + + + + + + + ; + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +copy_to_user_mcsafe(void *to, const void *from, unsigned len) +{ + unsigned long ret; + + stac(); + + + + + + ret = __memcpy_mcsafe(to, from, len); + clac(); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +raw_copy_from_user(void *dst, const void *src, unsigned long size) +{ + return copy_user_generic(dst, ( void *)src, size); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +raw_copy_to_user(void *dst, const void *src, unsigned long size) +{ + return copy_user_generic(( void *)dst, src, size); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) +unsigned long raw_copy_in_user(void *dst, const void *src, unsigned long size) +{ + return copy_user_generic(( void *)dst, + ( void *)src, size); +} + +extern long __copy_user_nocache(void *dst, const void *src, + unsigned size, int zerorest); + +extern long __copy_user_flushcache(void *dst, const void *src, unsigned size); +extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +__copy_from_user_inatomic_nocache(void *dst, const void *src, + unsigned size) +{ + __kasan_check_write(dst, size); + return __copy_user_nocache(dst, src, size, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +__copy_from_user_flushcache(void *dst, const void *src, unsigned size) +{ + __kasan_check_write(dst, size); + return __copy_user_flushcache(dst, src, size); +} + +unsigned long +mcsafe_handle_tail(char *to, char *from, unsigned len); +# 470 "./arch/x86/include/asm/uaccess.h" 2 +# 478 "./arch/x86/include/asm/uaccess.h" +static __attribute__((__warn_unused_result__)) inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool user_access_begin(const void *ptr, size_t len) +{ + if (__builtin_expect(!!(!({ ({ int __ret_warn_on = !!(!(!(preempt_count() & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8)) | (1UL << (0 + 8))))) && !pagefault_disabled()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (842)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/uaccess.h"), "i" (480), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (843)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (844)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); __builtin_expect(!!(!({ (void)0; __chk_range_not_ok((unsigned long )(ptr), len, (get_current()->thread.addr_limit.seg)); })), 1); })), 0)) + return 0; + ({ stac(); asm volatile ("# ALT: oldnstr\n" "661:\n\t" "" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+18)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "lfence" "\n" "665""1" ":\n" ".popsection\n" : : : "memory"); }); + return 1; +} +# 12 "./include/linux/uaccess.h" 2 +# 58 "./include/linux/uaccess.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +__copy_from_user_inatomic(void *to, const void *from, unsigned long n) +{ + instrument_copy_from_user(to, from, n); + check_object_size(to, n, false); + return raw_copy_from_user(to, from, n); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +__copy_from_user(void *to, const void *from, unsigned long n) +{ + __might_fault("include/linux/uaccess.h", 69); + instrument_copy_from_user(to, from, n); + check_object_size(to, n, false); + return raw_copy_from_user(to, from, n); +} +# 88 "./include/linux/uaccess.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +__copy_to_user_inatomic(void *to, const void *from, unsigned long n) +{ + instrument_copy_to_user(to, from, n); + check_object_size(from, n, true); + return raw_copy_to_user(to, from, n); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) unsigned long +__copy_to_user(void *to, const void *from, unsigned long n) +{ + __might_fault("include/linux/uaccess.h", 99); + instrument_copy_to_user(to, from, n); + check_object_size(from, n, true); + return raw_copy_to_user(to, from, n); +} +# 120 "./include/linux/uaccess.h" +extern __attribute__((__warn_unused_result__)) unsigned long +_copy_from_user(void *, const void *, unsigned long); +# 136 "./include/linux/uaccess.h" +extern __attribute__((__warn_unused_result__)) unsigned long +_copy_to_user(void *, const void *, unsigned long); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __attribute__((__warn_unused_result__)) +copy_from_user(void *to, const void *from, unsigned long n) +{ + if (__builtin_expect(!!(check_copy_size(to, n, false)), 1)) + n = _copy_from_user(to, from, n); + return n; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __attribute__((__warn_unused_result__)) +copy_to_user(void *to, const void *from, unsigned long n) +{ + if (__builtin_expect(!!(check_copy_size(from, n, true)), 1)) + n = _copy_to_user(to, from, n); + return n; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned long __attribute__((__warn_unused_result__)) +copy_in_user(void *to, const void *from, unsigned long n) +{ + __might_fault("include/linux/uaccess.h", 159); + if (({ ({ int __ret_warn_on = !!(!(!(preempt_count() & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8)) | (1UL << (0 + 8))))) && !pagefault_disabled()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (845)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/uaccess.h"), "i" (160), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (846)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (847)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); __builtin_expect(!!(!({ (void)0; __chk_range_not_ok((unsigned long )(to), n, (get_current()->thread.addr_limit.seg)); })), 1); }) && ({ ({ int __ret_warn_on = !!(!(!(preempt_count() & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8)) | (1UL << (0 + 8))))) && !pagefault_disabled()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (848)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/uaccess.h"), "i" (160), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (849)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (850)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); __builtin_expect(!!(!({ (void)0; __chk_range_not_ok((unsigned long )(from), n, (get_current()->thread.addr_limit.seg)); })), 1); })) + n = raw_copy_in_user(to, from, n); + return n; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void pagefault_disabled_inc(void) +{ + get_current()->pagefault_disabled++; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void pagefault_disabled_dec(void) +{ + get_current()->pagefault_disabled--; +} +# 183 "./include/linux/uaccess.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pagefault_disable(void) +{ + pagefault_disabled_inc(); + + + + + __asm__ __volatile__("": : :"memory"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pagefault_enable(void) +{ + + + + + __asm__ __volatile__("": : :"memory"); + pagefault_disabled_dec(); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pagefault_disabled(void) +{ + return get_current()->pagefault_disabled != 0; +} +# 234 "./include/linux/uaccess.h" +extern __attribute__((__warn_unused_result__)) int check_zeroed_user(const void *from, size_t size); +# 283 "./include/linux/uaccess.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) int +copy_struct_from_user(void *dst, size_t ksize, const void *src, + size_t usize) +{ + size_t size = __builtin_choose_expr(((!!(sizeof((typeof(ksize) *)1 == (typeof(usize) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(ksize) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(usize) * 0l)) : (int *)8))))), ((ksize) < (usize) ? (ksize) : (usize)), ({ typeof(ksize) __UNIQUE_ID___x851 = (ksize); typeof(usize) __UNIQUE_ID___y852 = (usize); ((__UNIQUE_ID___x851) < (__UNIQUE_ID___y852) ? (__UNIQUE_ID___x851) : (__UNIQUE_ID___y852)); })); + size_t rest = __builtin_choose_expr(((!!(sizeof((typeof(ksize) *)1 == (typeof(usize) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(ksize) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(usize) * 0l)) : (int *)8))))), ((ksize) > (usize) ? (ksize) : (usize)), ({ typeof(ksize) __UNIQUE_ID___x853 = (ksize); typeof(usize) __UNIQUE_ID___y854 = (usize); ((__UNIQUE_ID___x853) > (__UNIQUE_ID___y854) ? (__UNIQUE_ID___x853) : (__UNIQUE_ID___y854)); })) - size; + + + if (usize < ksize) { + memset(dst + size, 0, rest); + } else if (usize > ksize) { + int ret = check_zeroed_user(src + size, rest); + if (ret <= 0) + return ret ?: -7; + } + + if (copy_from_user(dst, src, size)) + return -14; + return 0; +} + +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); + +long copy_from_kernel_nofault(void *dst, const void *src, size_t size); +long __attribute__((no_instrument_function)) copy_to_kernel_nofault(void *dst, const void *src, size_t size); + +long copy_from_user_nofault(void *dst, const void *src, size_t size); +long __attribute__((no_instrument_function)) copy_to_user_nofault(void *dst, const void *src, + size_t size); + +long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, + long count); + +long strncpy_from_user_nofault(char *dst, const void *unsafe_addr, + long count); +long strnlen_user_nofault(const void *unsafe_addr, long count); +# 352 "./include/linux/uaccess.h" +void usercopy_warn(const char *name, const char *detail, bool to_user, + unsigned long offset, unsigned long len); +void __attribute__((__noreturn__)) usercopy_abort(const char *name, const char *detail, + bool to_user, unsigned long offset, + unsigned long len); +# 12 "./include/linux/sched/task.h" 2 + +struct task_struct; +struct rusage; +union thread_union; +struct css_set; + + + + +struct kernel_clone_args { + u64 flags; + int *pidfd; + int *child_tid; + int *parent_tid; + int exit_signal; + unsigned long stack; + unsigned long stack_size; + unsigned long tls; + pid_t *set_tid; + + size_t set_tid_size; + int cgroup; + struct cgroup *cgrp; + struct css_set *cset; +}; + + + + + + + +extern rwlock_t tasklist_lock; +extern spinlock_t mmlist_lock; + +extern union thread_union init_thread_union; +extern struct task_struct init_task; + + +extern int lockdep_tasklist_lock_is_held(void); + + +extern void schedule_tail(struct task_struct *prev); +extern void init_idle(struct task_struct *idle, int cpu); + +extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_dead(struct task_struct *p); + +void __attribute__((__noreturn__)) do_task_dead(void); + +extern void proc_caches_init(void); + +extern void fork_init(void); + +extern void release_task(struct task_struct * p); + + +extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, + struct task_struct *, unsigned long); +# 84 "./include/linux/sched/task.h" +extern void flush_thread(void); + + +extern void exit_thread(struct task_struct *tsk); + + + + + +extern void do_group_exit(int); + +extern void exit_files(struct task_struct *); +extern void exit_itimers(struct signal_struct *); + +extern long _do_fork(struct kernel_clone_args *kargs); +extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs); +extern long do_fork(unsigned long, unsigned long, unsigned long, int *, int *); +struct task_struct *fork_idle(int); +struct mm_struct *copy_init_mm(void); +extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); +extern long kernel_wait4(pid_t, int *, int, struct rusage *); + +extern void free_task(struct task_struct *tsk); + + + +extern void sched_exec(void); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct task_struct *get_task_struct(struct task_struct *t) +{ + refcount_inc(&t->usage); + return t; +} + +extern void __put_task_struct(struct task_struct *t); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_task_struct(struct task_struct *t) +{ + if (refcount_dec_and_test(&t->usage)) + __put_task_struct(t); +} + +void put_task_struct_rcu_user(struct task_struct *task); + + +extern int arch_task_struct_size __attribute__((__section__(".data..read_mostly"))); +# 152 "./include/linux/sched/task.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return t->stack_vm_area; +} +# 173 "./include/linux/sched/task.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} +# 10 "./include/linux/sched/signal.h" 2 +# 1 "./include/linux/cred.h" 1 +# 13 "./include/linux/cred.h" +# 1 "./include/linux/key.h" 1 +# 17 "./include/linux/key.h" +# 1 "./include/linux/sysctl.h" 1 +# 30 "./include/linux/sysctl.h" +# 1 "./include/uapi/linux/sysctl.h" 1 +# 35 "./include/uapi/linux/sysctl.h" +struct __sysctl_args { + int *name; + int nlen; + void *oldval; + size_t *oldlenp; + void *newval; + size_t newlen; + unsigned long __unused[4]; +}; + + + + + +enum +{ + CTL_KERN=1, + CTL_VM=2, + CTL_NET=3, + CTL_PROC=4, + CTL_FS=5, + CTL_DEBUG=6, + CTL_DEV=7, + CTL_BUS=8, + CTL_ABI=9, + CTL_CPU=10, + CTL_ARLAN=254, + CTL_S390DBF=5677, + CTL_SUNRPC=7249, + CTL_PM=9899, + CTL_FRV=9898, +}; + + +enum +{ + CTL_BUS_ISA=1 +}; + + +enum +{ + INOTIFY_MAX_USER_INSTANCES=1, + INOTIFY_MAX_USER_WATCHES=2, + INOTIFY_MAX_QUEUED_EVENTS=3 +}; + + +enum +{ + KERN_OSTYPE=1, + KERN_OSRELEASE=2, + KERN_OSREV=3, + KERN_VERSION=4, + KERN_SECUREMASK=5, + KERN_PROF=6, + KERN_NODENAME=7, + KERN_DOMAINNAME=8, + + KERN_PANIC=15, + KERN_REALROOTDEV=16, + + KERN_SPARC_REBOOT=21, + KERN_CTLALTDEL=22, + KERN_PRINTK=23, + KERN_NAMETRANS=24, + KERN_PPC_HTABRECLAIM=25, + KERN_PPC_ZEROPAGED=26, + KERN_PPC_POWERSAVE_NAP=27, + KERN_MODPROBE=28, + KERN_SG_BIG_BUFF=29, + KERN_ACCT=30, + KERN_PPC_L2CR=31, + + KERN_RTSIGNR=32, + KERN_RTSIGMAX=33, + + KERN_SHMMAX=34, + KERN_MSGMAX=35, + KERN_MSGMNB=36, + KERN_MSGPOOL=37, + KERN_SYSRQ=38, + KERN_MAX_THREADS=39, + KERN_RANDOM=40, + KERN_SHMALL=41, + KERN_MSGMNI=42, + KERN_SEM=43, + KERN_SPARC_STOP_A=44, + KERN_SHMMNI=45, + KERN_OVERFLOWUID=46, + KERN_OVERFLOWGID=47, + KERN_SHMPATH=48, + KERN_HOTPLUG=49, + KERN_IEEE_EMULATION_WARNINGS=50, + KERN_S390_USER_DEBUG_LOGGING=51, + KERN_CORE_USES_PID=52, + KERN_TAINTED=53, + KERN_CADPID=54, + KERN_PIDMAX=55, + KERN_CORE_PATTERN=56, + KERN_PANIC_ON_OOPS=57, + KERN_HPPA_PWRSW=58, + KERN_HPPA_UNALIGNED=59, + KERN_PRINTK_RATELIMIT=60, + KERN_PRINTK_RATELIMIT_BURST=61, + KERN_PTY=62, + KERN_NGROUPS_MAX=63, + KERN_SPARC_SCONS_PWROFF=64, + KERN_HZ_TIMER=65, + KERN_UNKNOWN_NMI_PANIC=66, + KERN_BOOTLOADER_TYPE=67, + KERN_RANDOMIZE=68, + KERN_SETUID_DUMPABLE=69, + KERN_SPIN_RETRY=70, + KERN_ACPI_VIDEO_FLAGS=71, + KERN_IA64_UNALIGNED=72, + KERN_COMPAT_LOG=73, + KERN_MAX_LOCK_DEPTH=74, + KERN_NMI_WATCHDOG=75, + KERN_PANIC_ON_NMI=76, + KERN_PANIC_ON_WARN=77, + KERN_PANIC_PRINT=78, +}; + + + + +enum +{ + VM_UNUSED1=1, + VM_UNUSED2=2, + VM_UNUSED3=3, + VM_UNUSED4=4, + VM_OVERCOMMIT_MEMORY=5, + VM_UNUSED5=6, + VM_UNUSED7=7, + VM_UNUSED8=8, + VM_UNUSED9=9, + VM_PAGE_CLUSTER=10, + VM_DIRTY_BACKGROUND=11, + VM_DIRTY_RATIO=12, + VM_DIRTY_WB_CS=13, + VM_DIRTY_EXPIRE_CS=14, + VM_NR_PDFLUSH_THREADS=15, + VM_OVERCOMMIT_RATIO=16, + VM_PAGEBUF=17, + VM_HUGETLB_PAGES=18, + VM_SWAPPINESS=19, + VM_LOWMEM_RESERVE_RATIO=20, + VM_MIN_FREE_KBYTES=21, + VM_MAX_MAP_COUNT=22, + VM_LAPTOP_MODE=23, + VM_BLOCK_DUMP=24, + VM_HUGETLB_GROUP=25, + VM_VFS_CACHE_PRESSURE=26, + VM_LEGACY_VA_LAYOUT=27, + VM_SWAP_TOKEN_TIMEOUT=28, + VM_DROP_PAGECACHE=29, + VM_PERCPU_PAGELIST_FRACTION=30, + VM_ZONE_RECLAIM_MODE=31, + VM_MIN_UNMAPPED=32, + VM_PANIC_ON_OOM=33, + VM_VDSO_ENABLED=34, + VM_MIN_SLAB=35, +}; + + + +enum +{ + NET_CORE=1, + NET_ETHER=2, + NET_802=3, + NET_UNIX=4, + NET_IPV4=5, + NET_IPX=6, + NET_ATALK=7, + NET_NETROM=8, + NET_AX25=9, + NET_BRIDGE=10, + NET_ROSE=11, + NET_IPV6=12, + NET_X25=13, + NET_TR=14, + NET_DECNET=15, + NET_ECONET=16, + NET_SCTP=17, + NET_LLC=18, + NET_NETFILTER=19, + NET_DCCP=20, + NET_IRDA=412, +}; + + +enum +{ + RANDOM_POOLSIZE=1, + RANDOM_ENTROPY_COUNT=2, + RANDOM_READ_THRESH=3, + RANDOM_WRITE_THRESH=4, + RANDOM_BOOT_ID=5, + RANDOM_UUID=6 +}; + + +enum +{ + PTY_MAX=1, + PTY_NR=2 +}; + + +enum +{ + BUS_ISA_MEM_BASE=1, + BUS_ISA_PORT_BASE=2, + BUS_ISA_PORT_SHIFT=3 +}; + + +enum +{ + NET_CORE_WMEM_MAX=1, + NET_CORE_RMEM_MAX=2, + NET_CORE_WMEM_DEFAULT=3, + NET_CORE_RMEM_DEFAULT=4, + + NET_CORE_MAX_BACKLOG=6, + NET_CORE_FASTROUTE=7, + NET_CORE_MSG_COST=8, + NET_CORE_MSG_BURST=9, + NET_CORE_OPTMEM_MAX=10, + NET_CORE_HOT_LIST_LENGTH=11, + NET_CORE_DIVERT_VERSION=12, + NET_CORE_NO_CONG_THRESH=13, + NET_CORE_NO_CONG=14, + NET_CORE_LO_CONG=15, + NET_CORE_MOD_CONG=16, + NET_CORE_DEV_WEIGHT=17, + NET_CORE_SOMAXCONN=18, + NET_CORE_BUDGET=19, + NET_CORE_AEVENT_ETIME=20, + NET_CORE_AEVENT_RSEQTH=21, + NET_CORE_WARNINGS=22, +}; + + + + + + + +enum +{ + NET_UNIX_DESTROY_DELAY=1, + NET_UNIX_DELETE_DELAY=2, + NET_UNIX_MAX_DGRAM_QLEN=3, +}; + + +enum +{ + NET_NF_CONNTRACK_MAX=1, + NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2, + NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3, + NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4, + NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5, + NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6, + NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7, + NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8, + NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9, + NET_NF_CONNTRACK_UDP_TIMEOUT=10, + NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11, + NET_NF_CONNTRACK_ICMP_TIMEOUT=12, + NET_NF_CONNTRACK_GENERIC_TIMEOUT=13, + NET_NF_CONNTRACK_BUCKETS=14, + NET_NF_CONNTRACK_LOG_INVALID=15, + NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16, + NET_NF_CONNTRACK_TCP_LOOSE=17, + NET_NF_CONNTRACK_TCP_BE_LIBERAL=18, + NET_NF_CONNTRACK_TCP_MAX_RETRANS=19, + NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20, + NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21, + NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22, + NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23, + NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24, + NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, + NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, + NET_NF_CONNTRACK_COUNT=27, + NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28, + NET_NF_CONNTRACK_FRAG6_TIMEOUT=29, + NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30, + NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31, + NET_NF_CONNTRACK_CHECKSUM=32, +}; + + +enum +{ + + NET_IPV4_FORWARD=8, + NET_IPV4_DYNADDR=9, + + NET_IPV4_CONF=16, + NET_IPV4_NEIGH=17, + NET_IPV4_ROUTE=18, + NET_IPV4_FIB_HASH=19, + NET_IPV4_NETFILTER=20, + + NET_IPV4_TCP_TIMESTAMPS=33, + NET_IPV4_TCP_WINDOW_SCALING=34, + NET_IPV4_TCP_SACK=35, + NET_IPV4_TCP_RETRANS_COLLAPSE=36, + NET_IPV4_DEFAULT_TTL=37, + NET_IPV4_AUTOCONFIG=38, + NET_IPV4_NO_PMTU_DISC=39, + NET_IPV4_TCP_SYN_RETRIES=40, + NET_IPV4_IPFRAG_HIGH_THRESH=41, + NET_IPV4_IPFRAG_LOW_THRESH=42, + NET_IPV4_IPFRAG_TIME=43, + NET_IPV4_TCP_MAX_KA_PROBES=44, + NET_IPV4_TCP_KEEPALIVE_TIME=45, + NET_IPV4_TCP_KEEPALIVE_PROBES=46, + NET_IPV4_TCP_RETRIES1=47, + NET_IPV4_TCP_RETRIES2=48, + NET_IPV4_TCP_FIN_TIMEOUT=49, + NET_IPV4_IP_MASQ_DEBUG=50, + NET_TCP_SYNCOOKIES=51, + NET_TCP_STDURG=52, + NET_TCP_RFC1337=53, + NET_TCP_SYN_TAILDROP=54, + NET_TCP_MAX_SYN_BACKLOG=55, + NET_IPV4_LOCAL_PORT_RANGE=56, + NET_IPV4_ICMP_ECHO_IGNORE_ALL=57, + NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58, + NET_IPV4_ICMP_SOURCEQUENCH_RATE=59, + NET_IPV4_ICMP_DESTUNREACH_RATE=60, + NET_IPV4_ICMP_TIMEEXCEED_RATE=61, + NET_IPV4_ICMP_PARAMPROB_RATE=62, + NET_IPV4_ICMP_ECHOREPLY_RATE=63, + NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64, + NET_IPV4_IGMP_MAX_MEMBERSHIPS=65, + NET_TCP_TW_RECYCLE=66, + NET_IPV4_ALWAYS_DEFRAG=67, + NET_IPV4_TCP_KEEPALIVE_INTVL=68, + NET_IPV4_INET_PEER_THRESHOLD=69, + NET_IPV4_INET_PEER_MINTTL=70, + NET_IPV4_INET_PEER_MAXTTL=71, + NET_IPV4_INET_PEER_GC_MINTIME=72, + NET_IPV4_INET_PEER_GC_MAXTIME=73, + NET_TCP_ORPHAN_RETRIES=74, + NET_TCP_ABORT_ON_OVERFLOW=75, + NET_TCP_SYNACK_RETRIES=76, + NET_TCP_MAX_ORPHANS=77, + NET_TCP_MAX_TW_BUCKETS=78, + NET_TCP_FACK=79, + NET_TCP_REORDERING=80, + NET_TCP_ECN=81, + NET_TCP_DSACK=82, + NET_TCP_MEM=83, + NET_TCP_WMEM=84, + NET_TCP_RMEM=85, + NET_TCP_APP_WIN=86, + NET_TCP_ADV_WIN_SCALE=87, + NET_IPV4_NONLOCAL_BIND=88, + NET_IPV4_ICMP_RATELIMIT=89, + NET_IPV4_ICMP_RATEMASK=90, + NET_TCP_TW_REUSE=91, + NET_TCP_FRTO=92, + NET_TCP_LOW_LATENCY=93, + NET_IPV4_IPFRAG_SECRET_INTERVAL=94, + NET_IPV4_IGMP_MAX_MSF=96, + NET_TCP_NO_METRICS_SAVE=97, + NET_TCP_DEFAULT_WIN_SCALE=105, + NET_TCP_MODERATE_RCVBUF=106, + NET_TCP_TSO_WIN_DIVISOR=107, + NET_TCP_BIC_BETA=108, + NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109, + NET_TCP_CONG_CONTROL=110, + NET_TCP_ABC=111, + NET_IPV4_IPFRAG_MAX_DIST=112, + NET_TCP_MTU_PROBING=113, + NET_TCP_BASE_MSS=114, + NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115, + NET_TCP_DMA_COPYBREAK=116, + NET_TCP_SLOW_START_AFTER_IDLE=117, + NET_CIPSOV4_CACHE_ENABLE=118, + NET_CIPSOV4_CACHE_BUCKET_SIZE=119, + NET_CIPSOV4_RBM_OPTFMT=120, + NET_CIPSOV4_RBM_STRICTVALID=121, + NET_TCP_AVAIL_CONG_CONTROL=122, + NET_TCP_ALLOWED_CONG_CONTROL=123, + NET_TCP_MAX_SSTHRESH=124, + NET_TCP_FRTO_RESPONSE=125, +}; + +enum { + NET_IPV4_ROUTE_FLUSH=1, + NET_IPV4_ROUTE_MIN_DELAY=2, + NET_IPV4_ROUTE_MAX_DELAY=3, + NET_IPV4_ROUTE_GC_THRESH=4, + NET_IPV4_ROUTE_MAX_SIZE=5, + NET_IPV4_ROUTE_GC_MIN_INTERVAL=6, + NET_IPV4_ROUTE_GC_TIMEOUT=7, + NET_IPV4_ROUTE_GC_INTERVAL=8, + NET_IPV4_ROUTE_REDIRECT_LOAD=9, + NET_IPV4_ROUTE_REDIRECT_NUMBER=10, + NET_IPV4_ROUTE_REDIRECT_SILENCE=11, + NET_IPV4_ROUTE_ERROR_COST=12, + NET_IPV4_ROUTE_ERROR_BURST=13, + NET_IPV4_ROUTE_GC_ELASTICITY=14, + NET_IPV4_ROUTE_MTU_EXPIRES=15, + NET_IPV4_ROUTE_MIN_PMTU=16, + NET_IPV4_ROUTE_MIN_ADVMSS=17, + NET_IPV4_ROUTE_SECRET_INTERVAL=18, + NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19, +}; + +enum +{ + NET_PROTO_CONF_ALL=-2, + NET_PROTO_CONF_DEFAULT=-3 + + +}; + +enum +{ + NET_IPV4_CONF_FORWARDING=1, + NET_IPV4_CONF_MC_FORWARDING=2, + NET_IPV4_CONF_PROXY_ARP=3, + NET_IPV4_CONF_ACCEPT_REDIRECTS=4, + NET_IPV4_CONF_SECURE_REDIRECTS=5, + NET_IPV4_CONF_SEND_REDIRECTS=6, + NET_IPV4_CONF_SHARED_MEDIA=7, + NET_IPV4_CONF_RP_FILTER=8, + NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9, + NET_IPV4_CONF_BOOTP_RELAY=10, + NET_IPV4_CONF_LOG_MARTIANS=11, + NET_IPV4_CONF_TAG=12, + NET_IPV4_CONF_ARPFILTER=13, + NET_IPV4_CONF_MEDIUM_ID=14, + NET_IPV4_CONF_NOXFRM=15, + NET_IPV4_CONF_NOPOLICY=16, + NET_IPV4_CONF_FORCE_IGMP_VERSION=17, + NET_IPV4_CONF_ARP_ANNOUNCE=18, + NET_IPV4_CONF_ARP_IGNORE=19, + NET_IPV4_CONF_PROMOTE_SECONDARIES=20, + NET_IPV4_CONF_ARP_ACCEPT=21, + NET_IPV4_CONF_ARP_NOTIFY=22, +}; + + +enum +{ + NET_IPV4_NF_CONNTRACK_MAX=1, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9, + NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10, + NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11, + NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12, + NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13, + NET_IPV4_NF_CONNTRACK_BUCKETS=14, + NET_IPV4_NF_CONNTRACK_LOG_INVALID=15, + NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16, + NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17, + NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18, + NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, + NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, + NET_IPV4_NF_CONNTRACK_COUNT=27, + NET_IPV4_NF_CONNTRACK_CHECKSUM=28, +}; + + +enum { + NET_IPV6_CONF=16, + NET_IPV6_NEIGH=17, + NET_IPV6_ROUTE=18, + NET_IPV6_ICMP=19, + NET_IPV6_BINDV6ONLY=20, + NET_IPV6_IP6FRAG_HIGH_THRESH=21, + NET_IPV6_IP6FRAG_LOW_THRESH=22, + NET_IPV6_IP6FRAG_TIME=23, + NET_IPV6_IP6FRAG_SECRET_INTERVAL=24, + NET_IPV6_MLD_MAX_MSF=25, +}; + +enum { + NET_IPV6_ROUTE_FLUSH=1, + NET_IPV6_ROUTE_GC_THRESH=2, + NET_IPV6_ROUTE_MAX_SIZE=3, + NET_IPV6_ROUTE_GC_MIN_INTERVAL=4, + NET_IPV6_ROUTE_GC_TIMEOUT=5, + NET_IPV6_ROUTE_GC_INTERVAL=6, + NET_IPV6_ROUTE_GC_ELASTICITY=7, + NET_IPV6_ROUTE_MTU_EXPIRES=8, + NET_IPV6_ROUTE_MIN_ADVMSS=9, + NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10 +}; + +enum { + NET_IPV6_FORWARDING=1, + NET_IPV6_HOP_LIMIT=2, + NET_IPV6_MTU=3, + NET_IPV6_ACCEPT_RA=4, + NET_IPV6_ACCEPT_REDIRECTS=5, + NET_IPV6_AUTOCONF=6, + NET_IPV6_DAD_TRANSMITS=7, + NET_IPV6_RTR_SOLICITS=8, + NET_IPV6_RTR_SOLICIT_INTERVAL=9, + NET_IPV6_RTR_SOLICIT_DELAY=10, + NET_IPV6_USE_TEMPADDR=11, + NET_IPV6_TEMP_VALID_LFT=12, + NET_IPV6_TEMP_PREFERED_LFT=13, + NET_IPV6_REGEN_MAX_RETRY=14, + NET_IPV6_MAX_DESYNC_FACTOR=15, + NET_IPV6_MAX_ADDRESSES=16, + NET_IPV6_FORCE_MLD_VERSION=17, + NET_IPV6_ACCEPT_RA_DEFRTR=18, + NET_IPV6_ACCEPT_RA_PINFO=19, + NET_IPV6_ACCEPT_RA_RTR_PREF=20, + NET_IPV6_RTR_PROBE_INTERVAL=21, + NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22, + NET_IPV6_PROXY_NDP=23, + NET_IPV6_ACCEPT_SOURCE_ROUTE=25, + NET_IPV6_ACCEPT_RA_FROM_LOCAL=26, + NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27, + __NET_IPV6_MAX +}; + + +enum { + NET_IPV6_ICMP_RATELIMIT = 1, + NET_IPV6_ICMP_ECHO_IGNORE_ALL = 2 +}; + + +enum { + NET_NEIGH_MCAST_SOLICIT=1, + NET_NEIGH_UCAST_SOLICIT=2, + NET_NEIGH_APP_SOLICIT=3, + NET_NEIGH_RETRANS_TIME=4, + NET_NEIGH_REACHABLE_TIME=5, + NET_NEIGH_DELAY_PROBE_TIME=6, + NET_NEIGH_GC_STALE_TIME=7, + NET_NEIGH_UNRES_QLEN=8, + NET_NEIGH_PROXY_QLEN=9, + NET_NEIGH_ANYCAST_DELAY=10, + NET_NEIGH_PROXY_DELAY=11, + NET_NEIGH_LOCKTIME=12, + NET_NEIGH_GC_INTERVAL=13, + NET_NEIGH_GC_THRESH1=14, + NET_NEIGH_GC_THRESH2=15, + NET_NEIGH_GC_THRESH3=16, + NET_NEIGH_RETRANS_TIME_MS=17, + NET_NEIGH_REACHABLE_TIME_MS=18, +}; + + +enum { + NET_DCCP_DEFAULT=1, +}; + + +enum { + NET_IPX_PPROP_BROADCASTING=1, + NET_IPX_FORWARDING=2 +}; + + +enum { + NET_LLC2=1, + NET_LLC_STATION=2, +}; + + +enum { + NET_LLC2_TIMEOUT=1, +}; + + +enum { + NET_LLC_STATION_ACK_TIMEOUT=1, +}; + + +enum { + NET_LLC2_ACK_TIMEOUT=1, + NET_LLC2_P_TIMEOUT=2, + NET_LLC2_REJ_TIMEOUT=3, + NET_LLC2_BUSY_TIMEOUT=4, +}; + + +enum { + NET_ATALK_AARP_EXPIRY_TIME=1, + NET_ATALK_AARP_TICK_TIME=2, + NET_ATALK_AARP_RETRANSMIT_LIMIT=3, + NET_ATALK_AARP_RESOLVE_TIME=4 +}; + + + +enum { + NET_NETROM_DEFAULT_PATH_QUALITY=1, + NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2, + NET_NETROM_NETWORK_TTL_INITIALISER=3, + NET_NETROM_TRANSPORT_TIMEOUT=4, + NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5, + NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6, + NET_NETROM_TRANSPORT_BUSY_DELAY=7, + NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8, + NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9, + NET_NETROM_ROUTING_CONTROL=10, + NET_NETROM_LINK_FAILS_COUNT=11, + NET_NETROM_RESET=12 +}; + + +enum { + NET_AX25_IP_DEFAULT_MODE=1, + NET_AX25_DEFAULT_MODE=2, + NET_AX25_BACKOFF_TYPE=3, + NET_AX25_CONNECT_MODE=4, + NET_AX25_STANDARD_WINDOW=5, + NET_AX25_EXTENDED_WINDOW=6, + NET_AX25_T1_TIMEOUT=7, + NET_AX25_T2_TIMEOUT=8, + NET_AX25_T3_TIMEOUT=9, + NET_AX25_IDLE_TIMEOUT=10, + NET_AX25_N2=11, + NET_AX25_PACLEN=12, + NET_AX25_PROTOCOL=13, + NET_AX25_DAMA_SLAVE_TIMEOUT=14 +}; + + +enum { + NET_ROSE_RESTART_REQUEST_TIMEOUT=1, + NET_ROSE_CALL_REQUEST_TIMEOUT=2, + NET_ROSE_RESET_REQUEST_TIMEOUT=3, + NET_ROSE_CLEAR_REQUEST_TIMEOUT=4, + NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5, + NET_ROSE_ROUTING_CONTROL=6, + NET_ROSE_LINK_FAIL_TIMEOUT=7, + NET_ROSE_MAX_VCS=8, + NET_ROSE_WINDOW_SIZE=9, + NET_ROSE_NO_ACTIVITY_TIMEOUT=10 +}; + + +enum { + NET_X25_RESTART_REQUEST_TIMEOUT=1, + NET_X25_CALL_REQUEST_TIMEOUT=2, + NET_X25_RESET_REQUEST_TIMEOUT=3, + NET_X25_CLEAR_REQUEST_TIMEOUT=4, + NET_X25_ACK_HOLD_BACK_TIMEOUT=5, + NET_X25_FORWARD=6 +}; + + +enum +{ + NET_TR_RIF_TIMEOUT=1 +}; + + +enum { + NET_DECNET_NODE_TYPE = 1, + NET_DECNET_NODE_ADDRESS = 2, + NET_DECNET_NODE_NAME = 3, + NET_DECNET_DEFAULT_DEVICE = 4, + NET_DECNET_TIME_WAIT = 5, + NET_DECNET_DN_COUNT = 6, + NET_DECNET_DI_COUNT = 7, + NET_DECNET_DR_COUNT = 8, + NET_DECNET_DST_GC_INTERVAL = 9, + NET_DECNET_CONF = 10, + NET_DECNET_NO_FC_MAX_CWND = 11, + NET_DECNET_MEM = 12, + NET_DECNET_RMEM = 13, + NET_DECNET_WMEM = 14, + NET_DECNET_DEBUG_LEVEL = 255 +}; + + +enum { + NET_DECNET_CONF_LOOPBACK = -2, + NET_DECNET_CONF_DDCMP = -3, + NET_DECNET_CONF_PPP = -4, + NET_DECNET_CONF_X25 = -5, + NET_DECNET_CONF_GRE = -6, + NET_DECNET_CONF_ETHER = -7 + + +}; + + +enum { + NET_DECNET_CONF_DEV_PRIORITY = 1, + NET_DECNET_CONF_DEV_T1 = 2, + NET_DECNET_CONF_DEV_T2 = 3, + NET_DECNET_CONF_DEV_T3 = 4, + NET_DECNET_CONF_DEV_FORWARDING = 5, + NET_DECNET_CONF_DEV_BLKSIZE = 6, + NET_DECNET_CONF_DEV_STATE = 7 +}; + + +enum { + NET_SCTP_RTO_INITIAL = 1, + NET_SCTP_RTO_MIN = 2, + NET_SCTP_RTO_MAX = 3, + NET_SCTP_RTO_ALPHA = 4, + NET_SCTP_RTO_BETA = 5, + NET_SCTP_VALID_COOKIE_LIFE = 6, + NET_SCTP_ASSOCIATION_MAX_RETRANS = 7, + NET_SCTP_PATH_MAX_RETRANS = 8, + NET_SCTP_MAX_INIT_RETRANSMITS = 9, + NET_SCTP_HB_INTERVAL = 10, + NET_SCTP_PRESERVE_ENABLE = 11, + NET_SCTP_MAX_BURST = 12, + NET_SCTP_ADDIP_ENABLE = 13, + NET_SCTP_PRSCTP_ENABLE = 14, + NET_SCTP_SNDBUF_POLICY = 15, + NET_SCTP_SACK_TIMEOUT = 16, + NET_SCTP_RCVBUF_POLICY = 17, +}; + + +enum { + NET_BRIDGE_NF_CALL_ARPTABLES = 1, + NET_BRIDGE_NF_CALL_IPTABLES = 2, + NET_BRIDGE_NF_CALL_IP6TABLES = 3, + NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4, + NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, +}; + + + +enum +{ + FS_NRINODE=1, + FS_STATINODE=2, + FS_MAXINODE=3, + FS_NRDQUOT=4, + FS_MAXDQUOT=5, + FS_NRFILE=6, + FS_MAXFILE=7, + FS_DENTRY=8, + FS_NRSUPER=9, + FS_MAXSUPER=10, + FS_OVERFLOWUID=11, + FS_OVERFLOWGID=12, + FS_LEASES=13, + FS_DIR_NOTIFY=14, + FS_LEASE_TIME=15, + FS_DQSTATS=16, + FS_XFS=17, + FS_AIO_NR=18, + FS_AIO_MAX_NR=19, + FS_INOTIFY=20, + FS_OCFS2=988, +}; + + +enum { + FS_DQ_LOOKUPS = 1, + FS_DQ_DROPS = 2, + FS_DQ_READS = 3, + FS_DQ_WRITES = 4, + FS_DQ_CACHE_HITS = 5, + FS_DQ_ALLOCATED = 6, + FS_DQ_FREE = 7, + FS_DQ_SYNCS = 8, + FS_DQ_WARNINGS = 9, +}; + + + + +enum { + DEV_CDROM=1, + DEV_HWMON=2, + DEV_PARPORT=3, + DEV_RAID=4, + DEV_MAC_HID=5, + DEV_SCSI=6, + DEV_IPMI=7, +}; + + +enum { + DEV_CDROM_INFO=1, + DEV_CDROM_AUTOCLOSE=2, + DEV_CDROM_AUTOEJECT=3, + DEV_CDROM_DEBUG=4, + DEV_CDROM_LOCK=5, + DEV_CDROM_CHECK_MEDIA=6 +}; + + +enum { + DEV_PARPORT_DEFAULT=-3 +}; + + +enum { + DEV_RAID_SPEED_LIMIT_MIN=1, + DEV_RAID_SPEED_LIMIT_MAX=2 +}; + + +enum { + DEV_PARPORT_DEFAULT_TIMESLICE=1, + DEV_PARPORT_DEFAULT_SPINTIME=2 +}; + + +enum { + DEV_PARPORT_SPINTIME=1, + DEV_PARPORT_BASE_ADDR=2, + DEV_PARPORT_IRQ=3, + DEV_PARPORT_DMA=4, + DEV_PARPORT_MODES=5, + DEV_PARPORT_DEVICES=6, + DEV_PARPORT_AUTOPROBE=16 +}; + + +enum { + DEV_PARPORT_DEVICES_ACTIVE=-3, +}; + + +enum { + DEV_PARPORT_DEVICE_TIMESLICE=1, +}; + + +enum { + DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1, + DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2, + DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3, + DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4, + DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5, + DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6 +}; + + +enum { + DEV_SCSI_LOGGING_LEVEL=1, +}; + + +enum { + DEV_IPMI_POWEROFF_POWERCYCLE=1, +}; + + +enum +{ + ABI_DEFHANDLER_COFF=1, + ABI_DEFHANDLER_ELF=2, + ABI_DEFHANDLER_LCALL7=3, + ABI_DEFHANDLER_LIBCSO=4, + ABI_TRACE=5, + ABI_FAKE_UTSNAME=6, +}; +# 31 "./include/linux/sysctl.h" 2 + + +struct completion; +struct ctl_table; +struct nsproxy; +struct ctl_table_root; +struct ctl_table_header; +struct ctl_dir; + + + + + + +extern const int sysctl_vals[]; + +typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, + size_t *lenp, loff_t *ppos); + +int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *, + loff_t *); +int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *, + loff_t *); +int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *, + size_t *, loff_t *); +int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_do_static_key(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +# 96 "./include/linux/sysctl.h" +struct ctl_table_poll { + atomic_t event; + wait_queue_head_t wait; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *proc_sys_poll_event(struct ctl_table_poll *poll) +{ + return (void *)(unsigned long)atomic_read(&poll->event); +} +# 114 "./include/linux/sysctl.h" +struct ctl_table { + const char *procname; + void *data; + int maxlen; + umode_t mode; + struct ctl_table *child; + proc_handler *proc_handler; + struct ctl_table_poll *poll; + void *extra1; + void *extra2; +} __attribute__((__designated_init__)); + +struct ctl_node { + struct rb_node node; + struct ctl_table_header *header; +}; + + + +struct ctl_table_header { + union { + struct { + struct ctl_table *ctl_table; + int used; + int count; + int nreg; + }; + struct callback_head rcu; + }; + struct completion *unregistering; + struct ctl_table *ctl_table_arg; + struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_dir *parent; + struct ctl_node *node; + struct hlist_head inodes; +}; + +struct ctl_dir { + + struct ctl_table_header header; + struct rb_root root; +}; + +struct ctl_table_set { + int (*is_seen)(struct ctl_table_set *); + struct ctl_dir dir; +}; + +struct ctl_table_root { + struct ctl_table_set default_set; + struct ctl_table_set *(*lookup)(struct ctl_table_root *root); + void (*set_ownership)(struct ctl_table_header *head, + struct ctl_table *table, + kuid_t *uid, kgid_t *gid); + int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); +}; + + +struct ctl_path { + const char *procname; +}; + + + +void proc_sys_poll_notify(struct ctl_table_poll *poll); + +extern void setup_sysctl_set(struct ctl_table_set *p, + struct ctl_table_root *root, + int (*is_seen)(struct ctl_table_set *)); +extern void retire_sysctl_set(struct ctl_table_set *set); + +struct ctl_table_header *__register_sysctl_table( + struct ctl_table_set *set, + const char *path, struct ctl_table *table); +struct ctl_table_header *__register_sysctl_paths( + struct ctl_table_set *set, + const struct ctl_path *path, struct ctl_table *table); +struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table); +struct ctl_table_header *register_sysctl_table(struct ctl_table * table); +struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, + struct ctl_table *table); + +void unregister_sysctl_table(struct ctl_table_header * table); + +extern int sysctl_init(void); +void do_sysctl_args(void); + +extern int pwrsw_enabled; +extern int unaligned_enabled; +extern int unaligned_dump_stack; +extern int no_unaligned_warning; + +extern struct ctl_table sysctl_mount_point[]; +extern struct ctl_table random_table[]; +extern struct ctl_table firmware_config_table[]; +extern struct ctl_table epoll_table[]; +# 244 "./include/linux/sysctl.h" +int sysctl_max_threads(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +# 18 "./include/linux/key.h" 2 + + +# 1 "./include/linux/assoc_array.h" 1 +# 22 "./include/linux/assoc_array.h" +struct assoc_array { + struct assoc_array_ptr *root; + unsigned long nr_leaves_on_tree; +}; + + + + +struct assoc_array_ops { + + unsigned long (*get_key_chunk)(const void *index_key, int level); + + + unsigned long (*get_object_key_chunk)(const void *object, int level); + + + bool (*compare_object)(const void *object, const void *index_key); + + + + + int (*diff_objects)(const void *object, const void *index_key); + + + void (*free_object)(void *object); +}; + + + + +struct assoc_array_edit; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void assoc_array_init(struct assoc_array *array) +{ + array->root = ((void *)0); + array->nr_leaves_on_tree = 0; +} + +extern int assoc_array_iterate(const struct assoc_array *array, + int (*iterator)(const void *object, + void *iterator_data), + void *iterator_data); +extern void *assoc_array_find(const struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern void assoc_array_destroy(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key, + void *object); +extern void assoc_array_insert_set_object(struct assoc_array_edit *edit, + void *object); +extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern void assoc_array_apply_edit(struct assoc_array_edit *edit); +extern void assoc_array_cancel_edit(struct assoc_array_edit *edit); +extern int assoc_array_gc(struct assoc_array *array, + const struct assoc_array_ops *ops, + bool (*iterator)(void *object, void *iterator_data), + void *iterator_data); +# 21 "./include/linux/key.h" 2 + + + + + + + +typedef int32_t key_serial_t; + + +typedef uint32_t key_perm_t; + +struct key; +struct net; +# 77 "./include/linux/key.h" +enum key_need_perm { + KEY_NEED_UNSPECIFIED, + KEY_NEED_VIEW, + KEY_NEED_READ, + KEY_NEED_WRITE, + KEY_NEED_SEARCH, + KEY_NEED_LINK, + KEY_NEED_SETATTR, + KEY_NEED_UNLINK, + KEY_SYSADMIN_OVERRIDE, + KEY_AUTHTOKEN_OVERRIDE, + KEY_DEFER_PERM_CHECK, +}; + +struct seq_file; +struct user_struct; +struct signal_struct; +struct cred; + +struct key_type; +struct key_owner; +struct key_tag; +struct keyring_list; +struct keyring_name; + +struct key_tag { + struct callback_head rcu; + refcount_t usage; + bool removed; +}; + +struct keyring_index_key { + + unsigned long hash; + union { + struct { + + u16 desc_len; + char desc[sizeof(long) - 2]; + + + + + }; + unsigned long x; + }; + struct key_type *type; + struct key_tag *domain_tag; + const char *description; +}; + +union key_payload { + void *rcu_data0; + void *data[4]; +}; +# 147 "./include/linux/key.h" +typedef struct __key_reference_with_attributes *key_ref_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) key_ref_t make_key_ref(const struct key *key, + bool possession) +{ + return (key_ref_t) ((unsigned long) key | possession); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct key *key_ref_to_ptr(const key_ref_t key_ref) +{ + return (struct key *) ((unsigned long) key_ref & ~1UL); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_key_possessed(const key_ref_t key_ref) +{ + return (unsigned long) key_ref & 1UL; +} + +typedef int (*key_restrict_link_func_t)(struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); + +struct key_restriction { + key_restrict_link_func_t check; + struct key *key; + struct key_type *keytype; +}; + +enum key_state { + KEY_IS_UNINSTANTIATED, + KEY_IS_POSITIVE, +}; +# 189 "./include/linux/key.h" +struct key { + refcount_t usage; + key_serial_t serial; + union { + struct list_head graveyard_link; + struct rb_node serial_node; + }; + + struct watch_list *watchers; + + struct rw_semaphore sem; + struct key_user *user; + void *security; + union { + time64_t expiry; + time64_t revoked_at; + }; + time64_t last_used_at; + kuid_t uid; + kgid_t gid; + key_perm_t perm; + unsigned short quotalen; + unsigned short datalen; + + + + short state; + + + + + + + unsigned long flags; +# 239 "./include/linux/key.h" + union { + struct keyring_index_key index_key; + struct { + unsigned long hash; + unsigned long len_desc; + struct key_type *type; + struct key_tag *domain_tag; + char *description; + }; + }; + + + + + + union { + union key_payload payload; + struct { + + struct list_head name_link; + struct assoc_array keys; + }; + }; +# 274 "./include/linux/key.h" + struct key_restriction *restrict_link; +}; + +extern struct key *key_alloc(struct key_type *type, + const char *desc, + kuid_t uid, kgid_t gid, + const struct cred *cred, + key_perm_t perm, + unsigned long flags, + struct key_restriction *restrict_link); +# 293 "./include/linux/key.h" +extern void key_revoke(struct key *key); +extern void key_invalidate(struct key *key); +extern void key_put(struct key *key); +extern bool key_put_tag(struct key_tag *tag); +extern void key_remove_domain(struct key_tag *domain_tag); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct key *__key_get(struct key *key) +{ + refcount_inc(&key->usage); + return key; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct key *key_get(struct key *key) +{ + return key ? __key_get(key) : key; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void key_ref_put(key_ref_t key_ref) +{ + key_put(key_ref_to_ptr(key_ref)); +} + +extern struct key *request_key_tag(struct key_type *type, + const char *description, + struct key_tag *domain_tag, + const char *callout_info); + +extern struct key *request_key_rcu(struct key_type *type, + const char *description, + struct key_tag *domain_tag); + +extern struct key *request_key_with_auxdata(struct key_type *type, + const char *description, + struct key_tag *domain_tag, + const void *callout_info, + size_t callout_len, + void *aux); +# 339 "./include/linux/key.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct key *request_key(struct key_type *type, + const char *description, + const char *callout_info) +{ + return request_key_tag(type, description, ((void *)0), callout_info); +} +# 378 "./include/linux/key.h" +extern int wait_for_key_construction(struct key *key, bool intr); + +extern int key_validate(const struct key *key); + +extern key_ref_t key_create_or_update(key_ref_t keyring, + const char *type, + const char *description, + const void *payload, + size_t plen, + key_perm_t perm, + unsigned long flags); + +extern int key_update(key_ref_t key, + const void *payload, + size_t plen); + +extern int key_link(struct key *keyring, + struct key *key); + +extern int key_move(struct key *key, + struct key *from_keyring, + struct key *to_keyring, + unsigned int flags); + +extern int key_unlink(struct key *keyring, + struct key *key); + +extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, + const struct cred *cred, + key_perm_t perm, + unsigned long flags, + struct key_restriction *restrict_link, + struct key *dest); + +extern int restrict_link_reject(struct key *keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restriction_key); + +extern int keyring_clear(struct key *keyring); + +extern key_ref_t keyring_search(key_ref_t keyring, + struct key_type *type, + const char *description, + bool recurse); + +extern int keyring_add_key(struct key *keyring, + struct key *key); + +extern int keyring_restrict(key_ref_t keyring, const char *type, + const char *restriction); + +extern struct key *key_lookup(key_serial_t id); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) key_serial_t key_serial(const struct key *key) +{ + return key ? key->serial : 0; +} + +extern void key_set_timeout(struct key *, unsigned); + +extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, + enum key_need_perm need_perm); +extern void key_free_user_ns(struct user_namespace *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) short key_read_state(const struct key *key) +{ + + return ({ typeof(*&key->state) ___p1 = ({ do { extern void __compiletime_assert_855(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&key->state) == sizeof(char) || sizeof(*&key->state) == sizeof(short) || sizeof(*&key->state) == sizeof(int) || sizeof(*&key->state) == sizeof(long)) || sizeof(*&key->state) == sizeof(long long))) __compiletime_assert_855(); } while (0); ({ typeof( _Generic((*&key->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&key->state))) __x = (*(const volatile typeof( _Generic((*&key->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&key->state))) *)&(*&key->state)); do { } while (0); (typeof(*&key->state))__x; }); }); do { extern void __compiletime_assert_856(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&key->state) == sizeof(char) || sizeof(*&key->state) == sizeof(short) || sizeof(*&key->state) == sizeof(int) || sizeof(*&key->state) == sizeof(long)))) __compiletime_assert_856(); } while (0); __asm__ __volatile__("": : :"memory"); ___p1; }); +} +# 456 "./include/linux/key.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool key_is_positive(const struct key *key) +{ + return key_read_state(key) == KEY_IS_POSITIVE; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool key_is_negative(const struct key *key) +{ + return key_read_state(key) < 0; +} +# 479 "./include/linux/key.h" +extern struct ctl_table key_sysctls[]; + + + + +extern int install_thread_keyring_to_cred(struct cred *cred); +extern void key_fsuid_changed(struct cred *new_cred); +extern void key_fsgid_changed(struct cred *new_cred); +extern void key_init(void); +# 14 "./include/linux/cred.h" 2 + + + +# 1 "./include/linux/sched/user.h" 1 + + + + + + + +# 1 "./include/linux/ratelimit.h" 1 +# 15 "./include/linux/ratelimit.h" +struct ratelimit_state { + raw_spinlock_t lock; + + int interval; + int burst; + int printed; + int missed; + unsigned long begin; + unsigned long flags; +}; +# 40 "./include/linux/ratelimit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ratelimit_state_init(struct ratelimit_state *rs, + int interval, int burst) +{ + memset(rs, 0, sizeof(*rs)); + + do { static struct lock_class_key __key; __raw_spin_lock_init((&rs->lock), "&rs->lock", &__key, LD_WAIT_SPIN); } while (0); + rs->interval = interval; + rs->burst = burst; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ratelimit_default_init(struct ratelimit_state *rs) +{ + return ratelimit_state_init(rs, (5 * 250), + 10); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ratelimit_state_exit(struct ratelimit_state *rs) +{ + if (!(rs->flags & ((((1UL))) << (0)))) + return; + + if (rs->missed) { + printk("\001" "4" "%s: %d output lines suppressed due to ratelimiting\n", get_current()->comm, rs->missed) + ; + rs->missed = 0; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags) +{ + rs->flags = flags; +} + +extern struct ratelimit_state printk_ratelimit_state; + +extern int ___ratelimit(struct ratelimit_state *rs, const char *func); +# 9 "./include/linux/sched/user.h" 2 + + + + +struct user_struct { + refcount_t __count; + atomic_t processes; + atomic_t sigpending; + + atomic_t fanotify_listeners; + + + atomic_long_t epoll_watches; + + + + unsigned long mq_bytes; + + unsigned long locked_shm; + unsigned long unix_inflight; + atomic_long_t pipe_bufs; + + + struct hlist_node uidhash_node; + kuid_t uid; + + + + atomic_long_t locked_vm; + + + + struct ratelimit_state ratelimit; +}; + +extern int uids_sysfs_init(void); + +extern struct user_struct *find_user(kuid_t); + +extern struct user_struct root_user; + + + + +extern struct user_struct * alloc_uid(kuid_t); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct user_struct *get_uid(struct user_struct *u) +{ + refcount_inc(&u->__count); + return u; +} +extern void free_uid(struct user_struct *); +# 18 "./include/linux/cred.h" 2 + +struct cred; +struct inode; + + + + +struct group_info { + atomic_t usage; + int ngroups; + kgid_t gid[0]; +} __attribute__((__designated_init__)); +# 40 "./include/linux/cred.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct group_info *get_group_info(struct group_info *gi) +{ + atomic_inc(&gi->usage); + return gi; +} +# 56 "./include/linux/cred.h" +extern struct group_info init_groups; + +extern struct group_info *groups_alloc(int); +extern void groups_free(struct group_info *); + +extern int in_group_p(kgid_t); +extern int in_egroup_p(kgid_t); +extern int groups_search(const struct group_info *, kgid_t); + +extern int set_current_groups(struct group_info *); +extern void set_groups(struct cred *, struct group_info *); +extern bool may_setgroups(void); +extern void groups_sort(struct group_info *); +# 111 "./include/linux/cred.h" +struct cred { + atomic_t usage; + + atomic_t subscribers; + void *put_addr; + unsigned magic; + + + + kuid_t uid; + kgid_t gid; + kuid_t suid; + kgid_t sgid; + kuid_t euid; + kgid_t egid; + kuid_t fsuid; + kgid_t fsgid; + unsigned securebits; + kernel_cap_t cap_inheritable; + kernel_cap_t cap_permitted; + kernel_cap_t cap_effective; + kernel_cap_t cap_bset; + kernel_cap_t cap_ambient; + + unsigned char jit_keyring; + + struct key *session_keyring; + struct key *process_keyring; + struct key *thread_keyring; + struct key *request_key_auth; + + + void *security; + + struct user_struct *user; + struct user_namespace *user_ns; + struct group_info *group_info; + + union { + int non_rcu; + struct callback_head rcu; + }; +} __attribute__((__designated_init__)); + +extern void __put_cred(struct cred *); +extern void exit_creds(struct task_struct *); +extern int copy_creds(struct task_struct *, unsigned long); +extern const struct cred *get_task_cred(struct task_struct *); +extern struct cred *cred_alloc_blank(void); +extern struct cred *prepare_creds(void); +extern struct cred *prepare_exec_creds(void); +extern int commit_creds(struct cred *); +extern void abort_creds(struct cred *); +extern const struct cred *override_creds(const struct cred *); +extern void revert_creds(const struct cred *); +extern struct cred *prepare_kernel_cred(struct task_struct *); +extern int change_create_files_as(struct cred *, struct inode *); +extern int set_security_override(struct cred *, u32); +extern int set_security_override_from_ctx(struct cred *, const char *); +extern int set_create_files_as(struct cred *, struct inode *); +extern int cred_fscmp(const struct cred *, const struct cred *); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) cred_init(void); + + + + + +extern void __invalid_creds(const struct cred *, const char *, unsigned); +extern void __validate_process_creds(struct task_struct *, + const char *, unsigned); + +extern bool creds_are_invalid(const struct cred *cred); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __validate_creds(const struct cred *cred, + const char *file, unsigned line) +{ + if (__builtin_expect(!!(creds_are_invalid(cred)), 0)) + __invalid_creds(cred, file, line); +} +# 201 "./include/linux/cred.h" +extern void validate_creds_for_do_exit(struct task_struct *); +# 214 "./include/linux/cred.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cap_ambient_invariant_ok(const struct cred *cred) +{ + return cap_issubset(cred->cap_ambient, + cap_intersect(cred->cap_permitted, + cred->cap_inheritable)); +} +# 228 "./include/linux/cred.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cred *get_new_cred(struct cred *cred) +{ + atomic_inc(&cred->usage); + return cred; +} +# 247 "./include/linux/cred.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cred *get_cred(const struct cred *cred) +{ + struct cred *nonconst_cred = (struct cred *) cred; + if (!cred) + return cred; + do { __validate_creds((cred), "include/linux/cred.h", 252); } while(0); + nonconst_cred->non_rcu = 0; + return get_new_cred(nonconst_cred); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct cred *get_cred_rcu(const struct cred *cred) +{ + struct cred *nonconst_cred = (struct cred *) cred; + if (!cred) + return ((void *)0); + if (!atomic_inc_not_zero(&nonconst_cred->usage)) + return ((void *)0); + do { __validate_creds((cred), "include/linux/cred.h", 264); } while(0); + nonconst_cred->non_rcu = 0; + return cred; +} +# 280 "./include/linux/cred.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_cred(const struct cred *_cred) +{ + struct cred *cred = (struct cred *) _cred; + + if (cred) { + do { __validate_creds((cred), "include/linux/cred.h", 285); } while(0); + if (atomic_dec_and_test(&(cred)->usage)) + __put_cred(cred); + } +} +# 390 "./include/linux/cred.h" +extern struct user_namespace init_user_ns; +# 11 "./include/linux/sched/signal.h" 2 +# 20 "./include/linux/sched/signal.h" +struct sighand_struct { + spinlock_t siglock; + refcount_t count; + wait_queue_head_t signalfd_wqh; + struct k_sigaction action[64]; +}; + + + + +struct pacct_struct { + int ac_flag; + long ac_exitcode; + unsigned long ac_mem; + u64 ac_utime, ac_stime; + unsigned long ac_minflt, ac_majflt; +}; + +struct cpu_itimer { + u64 expires; + u64 incr; +}; + + + + + +struct task_cputime_atomic { + atomic64_t utime; + atomic64_t stime; + atomic64_t sum_exec_runtime; +}; +# 66 "./include/linux/sched/signal.h" +struct thread_group_cputimer { + struct task_cputime_atomic cputime_atomic; +}; + +struct multiprocess_signals { + sigset_t signal; + struct hlist_node node; +}; +# 82 "./include/linux/sched/signal.h" +struct signal_struct { + refcount_t sigcnt; + atomic_t live; + int nr_threads; + struct list_head thread_head; + + wait_queue_head_t wait_chldexit; + + + struct task_struct *curr_target; + + + struct sigpending shared_pending; + + + struct hlist_head multiprocess; + + + int group_exit_code; + + + + + + int notify_count; + struct task_struct *group_exit_task; + + + int group_stop_count; + unsigned int flags; +# 122 "./include/linux/sched/signal.h" + unsigned int is_child_subreaper:1; + unsigned int has_child_subreaper:1; + + + + + int posix_timer_id; + struct list_head posix_timers; + + + struct hrtimer real_timer; + ktime_t it_real_incr; + + + + + + + struct cpu_itimer it[2]; + + + + + + struct thread_group_cputimer cputimer; + + + + struct posix_cputimers posix_cputimers; + + + struct pid *pids[PIDTYPE_MAX]; + + + + + + struct pid *tty_old_pgrp; + + + int leader; + + struct tty_struct *tty; + + + struct autogroup *autogroup; + + + + + + + + seqlock_t stats_lock; + u64 utime, stime, cutime, cstime; + u64 gtime; + u64 cgtime; + struct prev_cputime prev_cputime; + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long inblock, oublock, cinblock, coublock; + unsigned long maxrss, cmaxrss; + struct task_io_accounting ioac; + + + + + + + + unsigned long long sum_sched_runtime; +# 203 "./include/linux/sched/signal.h" + struct rlimit rlim[16]; + + + struct pacct_struct pacct; + + + struct taskstats *stats; + + + unsigned audit_tty; + struct tty_audit_buf *tty_audit_buf; + + + + + + + bool oom_flag_origin; + short oom_score_adj; + short oom_score_adj_min; + + struct mm_struct *oom_mm; + + + struct mutex cred_guard_mutex; + + + + + + struct mutex exec_update_mutex; + + + +} __attribute__((__designated_init__)); +# 258 "./include/linux/sched/signal.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + ({ int __ret_warn_on = !!(sig->flags & (0x00000004|0x00000008)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (857)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/sched/signal.h"), "i" (261), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (858)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (859)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + sig->flags = (sig->flags & ~((0x00000010|0x00000020) | 0x00000001 | 0x00000002)) | flags; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int signal_group_exit(const struct signal_struct *sig) +{ + return (sig->flags & 0x00000004) || + (sig->group_exit_task != ((void *)0)); +} + +extern void flush_signals(struct task_struct *); +extern void ignore_signals(struct task_struct *); +extern void flush_signal_handlers(struct task_struct *, int force_default); +extern int dequeue_signal(struct task_struct *task, + sigset_t *mask, kernel_siginfo_t *info); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kernel_dequeue_signal(void) +{ + struct task_struct *task = get_current(); + kernel_siginfo_t __info; + int ret; + + spin_lock_irq(&task->sighand->siglock); + ret = dequeue_signal(task, &task->blocked, &__info); + spin_unlock_irq(&task->sighand->siglock); + + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kernel_signal_stop(void) +{ + spin_lock_irq(&get_current()->sighand->siglock); + if (get_current()->jobctl & (1UL << 16)) + do { unsigned long flags; ({ int __ret_warn_on = !!(!(((0x0100 | 0x0004)) & (0x0004 | 0x0008 | 0x0040 | 0x0080))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (860)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/sched/signal.h"), "i" (295), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (861)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (862)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(&get_current()->pi_lock); } while (0); get_current()->task_state_change = ({ __label__ __here; __here: (unsigned long)&&__here; }); get_current()->state = ((0x0100 | 0x0004)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&get_current()->pi_lock, flags); } while (0); } while (0); + spin_unlock_irq(&get_current()->sighand->siglock); + + schedule(); +} +# 311 "./include/linux/sched/signal.h" +int force_sig_fault_to_task(int sig, int code, void *addr + + + , struct task_struct *t); +int force_sig_fault(int sig, int code, void *addr + + ); +int send_sig_fault(int sig, int code, void *addr + + + , struct task_struct *t); + +int force_sig_mceerr(int code, void *, short); +int send_sig_mceerr(int code, void *, short, struct task_struct *); + +int force_sig_bnderr(void *addr, void *lower, void *upper); +int force_sig_pkuerr(void *addr, u32 pkey); + +int force_sig_ptrace_errno_trap(int errno, void *addr); + +extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); +extern void force_sigsegv(int sig); +extern int force_sig_info(struct kernel_siginfo *); +extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); +extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, + const struct cred *); +extern int kill_pgrp(struct pid *pid, int sig, int priv); +extern int kill_pid(struct pid *pid, int sig, int priv); +extern __attribute__((__warn_unused_result__)) bool do_notify_parent(struct task_struct *, int); +extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); +extern void force_sig(int); +extern int send_sig(int, struct task_struct *, int); +extern int zap_other_threads(struct task_struct *p); +extern struct sigqueue *sigqueue_alloc(void); +extern void sigqueue_free(struct sigqueue *); +extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int restart_syscall(void) +{ + set_tsk_thread_flag(get_current(), 2); + return -513; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int signal_pending(struct task_struct *p) +{ + return __builtin_expect(!!(test_tsk_thread_flag(p,2)), 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __fatal_signal_pending(struct task_struct *p) +{ + return __builtin_expect(!!(sigismember(&p->pending.signal, 9)), 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fatal_signal_pending(struct task_struct *p) +{ + return signal_pending(p) && __fatal_signal_pending(p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (0x0001 | 0x0100))) + return 0; + if (!signal_pending(p)) + return 0; + + return (state & 0x0001) || __fatal_signal_pending(p); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fault_signal_pending(vm_fault_t fault_flags, + struct pt_regs *regs) +{ + return __builtin_expect(!!((fault_flags & VM_FAULT_RETRY) && (fatal_signal_pending(get_current()) || (user_mode(regs) && signal_pending(get_current())))), 0) + + ; +} + + + + + + + +extern void recalc_sigpending_and_wake(struct task_struct *t); +extern void recalc_sigpending(void); +extern void calculate_sigpending(void); + +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? 0x0100 : 0); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? 0x0008 : 0); +} + +void task_join_group_stop(struct task_struct *task); +# 465 "./include/linux/sched/signal.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_restore_sigmask(void) +{ + get_current()->restore_sigmask = true; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_tsk_restore_sigmask(struct task_struct *task) +{ + task->restore_sigmask = false; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_restore_sigmask(void) +{ + get_current()->restore_sigmask = false; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool test_restore_sigmask(void) +{ + return get_current()->restore_sigmask; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool test_tsk_restore_sigmask(struct task_struct *task) +{ + return task->restore_sigmask; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool test_and_clear_restore_sigmask(void) +{ + if (!get_current()->restore_sigmask) + return false; + get_current()->restore_sigmask = false; + return true; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void restore_saved_sigmask(void) +{ + if (test_and_clear_restore_sigmask()) + __set_current_blocked(&get_current()->saved_sigmask); +} + +extern int set_user_sigmask(const sigset_t *umask, size_t sigsetsize); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void restore_saved_sigmask_unless(bool interrupted) +{ + if (interrupted) + ({ int __ret_warn_on = !!(!test_ti_thread_flag(((struct thread_info *)get_current()), 2)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (863)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/sched/signal.h"), "i" (505), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (864)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (865)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + else + restore_saved_sigmask(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) sigset_t *sigmask_to_save(void) +{ + sigset_t *res = &get_current()->blocked; + if (__builtin_expect(!!(test_restore_sigmask()), 0)) + res = &get_current()->saved_sigmask; + return res; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kill_cad_pid(int sig, int priv) +{ + return kill_pid(cad_pid, sig, priv); +} +# 530 "./include/linux/sched/signal.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int on_sig_stack(unsigned long sp) +{ +# 541 "./include/linux/sched/signal.h" + if (get_current()->sas_ss_flags & (1U << 31)) + return 0; + + + + + + return sp > get_current()->sas_ss_sp && + sp - get_current()->sas_ss_sp <= get_current()->sas_ss_size; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sas_ss_flags(unsigned long sp) +{ + if (!get_current()->sas_ss_size) + return 2; + + return on_sig_stack(sp) ? 1 : 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sas_ss_reset(struct task_struct *p) +{ + p->sas_ss_sp = 0; + p->sas_ss_size = 0; + p->sas_ss_flags = 2; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long sigsp(unsigned long sp, struct ksignal *ksig) +{ + if (__builtin_expect(!!((ksig->ka.sa.sa_flags & 0x08000000u)), 0) && ! sas_ss_flags(sp)) + + + + return get_current()->sas_ss_sp + get_current()->sas_ss_size; + + return sp; +} + +extern void __cleanup_sighand(struct sighand_struct *); +extern void flush_itimer_signals(void); +# 591 "./include/linux/sched/signal.h" +extern bool current_is_single_threaded(void); +# 613 "./include/linux/sched/signal.h" +typedef int (*proc_visitor)(struct task_struct *p, void *data); +void walk_process_tree(struct task_struct *top, proc_visitor, void *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct pid *task_pid_type(struct task_struct *task, enum pid_type type) +{ + struct pid *pid; + if (type == PIDTYPE_PID) + pid = task_pid(task); + else + pid = task->signal->pids[type]; + return pid; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid *task_tgid(struct task_struct *task) +{ + return task->signal->pids[PIDTYPE_TGID]; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid *task_pgrp(struct task_struct *task) +{ + return task->signal->pids[PIDTYPE_PGID]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid *task_session(struct task_struct *task) +{ + return task->signal->pids[PIDTYPE_SID]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_nr_threads(struct task_struct *task) +{ + return task->signal->nr_threads; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +bool same_thread_group(struct task_struct *p1, struct task_struct *p2) +{ + return p1->signal == p2->signal; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct task_struct *next_thread(const struct task_struct *p) +{ + return ({ void *__mptr = (void *)(({ do { extern void __compiletime_assert_866(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(p->thread_group.next) == sizeof(char) || sizeof(p->thread_group.next) == sizeof(short) || sizeof(p->thread_group.next) == sizeof(int) || sizeof(p->thread_group.next) == sizeof(long)) || sizeof(p->thread_group.next) == sizeof(long long))) __compiletime_assert_866(); } while (0); ({ typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) __x = (*(const volatile typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) *)&(p->thread_group.next)); do { } while (0); (typeof(p->thread_group.next))__x; }); })); do { extern void __compiletime_assert_867(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(({ do { extern void __compiletime_assert_866(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(p->thread_group.next) == sizeof(char) || sizeof(p->thread_group.next) == sizeof(short) || sizeof(p->thread_group.next) == sizeof(int) || sizeof(p->thread_group.next) == sizeof(long)) || sizeof(p->thread_group.next) == sizeof(long long))) __compiletime_assert_866(); } while (0); ({ typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) __x = (*(const volatile typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) *)&(p->thread_group.next)); do { } while (0); (typeof(p->thread_group.next))__x; }); }))), typeof(((struct task_struct *)0)->thread_group)) && !__builtin_types_compatible_p(typeof(*(({ do { extern void __compiletime_assert_866(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(p->thread_group.next) == sizeof(char) || sizeof(p->thread_group.next) == sizeof(short) || sizeof(p->thread_group.next) == sizeof(int) || sizeof(p->thread_group.next) == sizeof(long)) || sizeof(p->thread_group.next) == sizeof(long long))) __compiletime_assert_866(); } while (0); ({ typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) __x = (*(const volatile typeof( _Generic((p->thread_group.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (p->thread_group.next))) *)&(p->thread_group.next)); do { } while (0); (typeof(p->thread_group.next))__x; }); }))), typeof(void))))) __compiletime_assert_867(); } while (0); ((struct task_struct *)(__mptr - __builtin_offsetof(struct task_struct, thread_group))); }) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int thread_group_empty(struct task_struct *p) +{ + return list_empty(&p->thread_group); +} + + + + +extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, + unsigned long *flags); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sighand_struct *lock_task_sighand(struct task_struct *task, + unsigned long *flags) +{ + struct sighand_struct *ret; + + ret = __lock_task_sighand(task, flags); + (void)(ret); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void unlock_task_sighand(struct task_struct *task, + unsigned long *flags) +{ + spin_unlock_irqrestore(&task->sighand->siglock, *flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long task_rlimit(const struct task_struct *task, + unsigned int limit) +{ + return ({ do { extern void __compiletime_assert_868(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(char) || sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(short) || sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(int) || sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(long)) || sizeof(task->signal->rlim[limit].rlim_cur) == sizeof(long long))) __compiletime_assert_868(); } while (0); ({ typeof( _Generic((task->signal->rlim[limit].rlim_cur), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (task->signal->rlim[limit].rlim_cur))) __x = (*(const volatile typeof( _Generic((task->signal->rlim[limit].rlim_cur), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (task->signal->rlim[limit].rlim_cur))) *)&(task->signal->rlim[limit].rlim_cur)); do { } while (0); (typeof(task->signal->rlim[limit].rlim_cur))__x; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long task_rlimit_max(const struct task_struct *task, + unsigned int limit) +{ + return ({ do { extern void __compiletime_assert_869(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(task->signal->rlim[limit].rlim_max) == sizeof(char) || sizeof(task->signal->rlim[limit].rlim_max) == sizeof(short) || sizeof(task->signal->rlim[limit].rlim_max) == sizeof(int) || sizeof(task->signal->rlim[limit].rlim_max) == sizeof(long)) || sizeof(task->signal->rlim[limit].rlim_max) == sizeof(long long))) __compiletime_assert_869(); } while (0); ({ typeof( _Generic((task->signal->rlim[limit].rlim_max), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (task->signal->rlim[limit].rlim_max))) __x = (*(const volatile typeof( _Generic((task->signal->rlim[limit].rlim_max), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (task->signal->rlim[limit].rlim_max))) *)&(task->signal->rlim[limit].rlim_max)); do { } while (0); (typeof(task->signal->rlim[limit].rlim_max))__x; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long rlimit(unsigned int limit) +{ + return task_rlimit(get_current(), limit); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long rlimit_max(unsigned int limit) +{ + return task_rlimit_max(get_current(), limit); +} +# 7 "./include/linux/rcuwait.h" 2 +# 16 "./include/linux/rcuwait.h" +struct rcuwait { + struct task_struct *task; +}; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rcuwait_init(struct rcuwait *w) +{ + w->task = ((void *)0); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rcuwait_active(struct rcuwait *w) +{ + return !!({ typeof(*(w->task)) *_________p1 = (typeof(*(w->task)) *)({ do { extern void __compiletime_assert_870(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((w->task)) == sizeof(char) || sizeof((w->task)) == sizeof(short) || sizeof((w->task)) == sizeof(int) || sizeof((w->task)) == sizeof(long)) || sizeof((w->task)) == sizeof(long long))) __compiletime_assert_870(); } while (0); ({ typeof( _Generic(((w->task)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((w->task)))) __x = (*(const volatile typeof( _Generic(((w->task)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((w->task)))) *)&((w->task))); do { } while (0); (typeof((w->task)))__x; }); }); ; ((typeof(*(w->task)) *)(_________p1)); }); +} + +extern int rcuwait_wake_up(struct rcuwait *w); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void prepare_to_rcuwait(struct rcuwait *w) +{ + do { uintptr_t _r_a_p__v = (uintptr_t)(get_current()); ; if (__builtin_constant_p(get_current()) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_871(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((w->task)) == sizeof(char) || sizeof((w->task)) == sizeof(short) || sizeof((w->task)) == sizeof(int) || sizeof((w->task)) == sizeof(long)) || sizeof((w->task)) == sizeof(long long))) __compiletime_assert_871(); } while (0); do { *(volatile typeof((w->task)) *)&((w->task)) = ((typeof(w->task))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_872(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&w->task) == sizeof(char) || sizeof(*&w->task) == sizeof(short) || sizeof(*&w->task) == sizeof(int) || sizeof(*&w->task) == sizeof(long)))) __compiletime_assert_872(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_873(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&w->task) == sizeof(char) || sizeof(*&w->task) == sizeof(short) || sizeof(*&w->task) == sizeof(int) || sizeof(*&w->task) == sizeof(long)) || sizeof(*&w->task) == sizeof(long long))) __compiletime_assert_873(); } while (0); do { *(volatile typeof(*&w->task) *)&(*&w->task) = ((typeof(*((typeof(w->task))_r_a_p__v)) *)((typeof(w->task))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void finish_rcuwait(struct rcuwait *w) +{ + do { uintptr_t _r_a_p__v = (uintptr_t)(((void *)0)); ; if (__builtin_constant_p(((void *)0)) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_874(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((w->task)) == sizeof(char) || sizeof((w->task)) == sizeof(short) || sizeof((w->task)) == sizeof(int) || sizeof((w->task)) == sizeof(long)) || sizeof((w->task)) == sizeof(long long))) __compiletime_assert_874(); } while (0); do { *(volatile typeof((w->task)) *)&((w->task)) = ((typeof(w->task))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_875(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&w->task) == sizeof(char) || sizeof(*&w->task) == sizeof(short) || sizeof(*&w->task) == sizeof(int) || sizeof(*&w->task) == sizeof(long)))) __compiletime_assert_875(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_876(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&w->task) == sizeof(char) || sizeof(*&w->task) == sizeof(short) || sizeof(*&w->task) == sizeof(int) || sizeof(*&w->task) == sizeof(long)) || sizeof(*&w->task) == sizeof(long long))) __compiletime_assert_876(); } while (0); do { *(volatile typeof(*&w->task) *)&(*&w->task) = ((typeof(*((typeof(w->task))_r_a_p__v)) *)((typeof(w->task))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + do { ({ int __ret_warn_on = !!(((0x0000) & (0x0004 | 0x0008 | 0x0040 | 0x0080))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (877)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/rcuwait.h"), "i" (53), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (878)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (879)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); get_current()->task_state_change = ({ __label__ __here; __here: (unsigned long)&&__here; }); get_current()->state = (0x0000); } while (0); +} +# 8 "./include/linux/percpu-rwsem.h" 2 + +# 1 "./include/linux/rcu_sync.h" 1 +# 17 "./include/linux/rcu_sync.h" +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + + struct callback_head cb_head; +}; +# 32 "./include/linux/rcu_sync.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rcu_sync_is_idle(struct rcu_sync *rsp) +{ + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!rcu_read_lock_any_held())) { __warned = true; lockdep_rcu_suspicious("include/linux/rcu_sync.h", 34, "suspicious rcu_sync_is_idle() usage"); } } while (0) + ; + return !({ do { extern void __compiletime_assert_880(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(rsp->gp_state) == sizeof(char) || sizeof(rsp->gp_state) == sizeof(short) || sizeof(rsp->gp_state) == sizeof(int) || sizeof(rsp->gp_state) == sizeof(long)) || sizeof(rsp->gp_state) == sizeof(long long))) __compiletime_assert_880(); } while (0); ({ typeof( _Generic((rsp->gp_state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rsp->gp_state))) __x = (*(const volatile typeof( _Generic((rsp->gp_state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rsp->gp_state))) *)&(rsp->gp_state)); do { } while (0); (typeof(rsp->gp_state))__x; }); }); +} + +extern void rcu_sync_init(struct rcu_sync *); +extern void rcu_sync_enter_start(struct rcu_sync *); +extern void rcu_sync_enter(struct rcu_sync *); +extern void rcu_sync_exit(struct rcu_sync *); +extern void rcu_sync_dtor(struct rcu_sync *); +# 10 "./include/linux/percpu-rwsem.h" 2 + + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; + + struct lockdep_map dep_map; + +}; +# 45 "./include/linux/percpu-rwsem.h" +extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_down_read(struct percpu_rw_semaphore *sem) +{ + do { __might_sleep("include/linux/percpu-rwsem.h", 49, 0); do { } while (0); } while (0); + + lock_acquire(&sem->dep_map, 0, 0, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); +# 62 "./include/linux/percpu-rwsem.h" + if (__builtin_expect(!!(rcu_sync_is_idle(&sem->rss)), 1)) + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sem->read_count)) { case 1: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); + else + __percpu_down_read(sem, false); + + + + + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) +{ + bool ret = true; + + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + + + + if (__builtin_expect(!!(rcu_sync_is_idle(&sem->rss)), 1)) + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sem->read_count)) { case 1: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); + else + ret = __percpu_down_read(sem, true); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + + + + + + if (ret) + lock_acquire(&sem->dep_map, 0, 1, 1, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_up_read(struct percpu_rw_semaphore *sem) +{ + lock_release(&sem->dep_map, (unsigned long)__builtin_return_address(0)); + + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + + + + if (__builtin_expect(!!(rcu_sync_is_idle(&sem->rss)), 1)) { + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sem->read_count)) { case 1: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*sem->read_count))(1)) && ((-(typeof(*sem->read_count))(1)) == 1 || (-(typeof(*sem->read_count))(1)) == -1)) ? (int)(-(typeof(*sem->read_count))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*sem->read_count))(1)); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*sem->read_count))(1)) && ((-(typeof(*sem->read_count))(1)) == 1 || (-(typeof(*sem->read_count))(1)) == -1)) ? (int)(-(typeof(*sem->read_count))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*sem->read_count))(1)); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*sem->read_count))(1)) && ((-(typeof(*sem->read_count))(1)) == 1 || (-(typeof(*sem->read_count))(1)) == -1)) ? (int)(-(typeof(*sem->read_count))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*sem->read_count))(1)); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*sem->read_count))(1)) && ((-(typeof(*sem->read_count))(1)) == 1 || (-(typeof(*sem->read_count))(1)) == -1)) ? (int)(-(typeof(*sem->read_count))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*sem->read_count))(1)); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); + } else { + + + + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + + + + + + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(*sem->read_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*sem->read_count)) { case 1: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*sem->read_count))(1)) && ((-(typeof(*sem->read_count))(1)) == 1 || (-(typeof(*sem->read_count))(1)) == -1)) ? (int)(-(typeof(*sem->read_count))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*sem->read_count))(1)); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*sem->read_count))(1)) && ((-(typeof(*sem->read_count))(1)) == 1 || (-(typeof(*sem->read_count))(1)) == -1)) ? (int)(-(typeof(*sem->read_count))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*sem->read_count))(1)); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*sem->read_count))(1)) && ((-(typeof(*sem->read_count))(1)) == 1 || (-(typeof(*sem->read_count))(1)) == -1)) ? (int)(-(typeof(*sem->read_count))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*sem->read_count))(1)); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*sem->read_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*sem->read_count))(1)) && ((-(typeof(*sem->read_count))(1)) == 1 || (-(typeof(*sem->read_count))(1)) == -1)) ? (int)(-(typeof(*sem->read_count))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*sem->read_count))(1)); (void)pao_tmp__; } switch (sizeof((*sem->read_count))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "qi" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "ri" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*sem->read_count)) : "re" ((pao_T__)(-(typeof(*sem->read_count))(1)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); + rcuwait_wake_up(&sem->writer); + } + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + +extern void percpu_down_write(struct percpu_rw_semaphore *); +extern void percpu_up_write(struct percpu_rw_semaphore *); + +extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, + const char *, struct lock_class_key *); + +extern void percpu_free_rwsem(struct percpu_rw_semaphore *); +# 141 "./include/linux/percpu-rwsem.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_rwsem_release(struct percpu_rw_semaphore *sem, + bool read, unsigned long ip) +{ + lock_release(&sem->dep_map, ip); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, + bool read, unsigned long ip) +{ + lock_acquire(&sem->dep_map, 0, 1, read, 1, ((void *)0), ip); +} +# 34 "./include/linux/fs.h" 2 + +# 1 "./include/linux/delayed_call.h" 1 +# 10 "./include/linux/delayed_call.h" +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_delayed_call(struct delayed_call *call, + void (*fn)(void *), void *arg) +{ + call->fn = fn; + call->arg = arg; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void do_delayed_call(struct delayed_call *call) +{ + if (call->fn) + call->fn(call->arg); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_delayed_call(struct delayed_call *call) +{ + call->fn = ((void *)0); +} +# 36 "./include/linux/fs.h" 2 +# 1 "./include/linux/uuid.h" 1 +# 11 "./include/linux/uuid.h" +# 1 "./include/uapi/linux/uuid.h" 1 +# 23 "./include/uapi/linux/uuid.h" +typedef struct { + __u8 b[16]; +} guid_t; +# 35 "./include/uapi/linux/uuid.h" +typedef guid_t uuid_le; +# 12 "./include/linux/uuid.h" 2 + + + + +typedef struct { + __u8 b[16]; +} uuid_t; +# 33 "./include/linux/uuid.h" +extern const guid_t guid_null; +extern const uuid_t uuid_null; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool guid_equal(const guid_t *u1, const guid_t *u2) +{ + return memcmp(u1, u2, sizeof(guid_t)) == 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void guid_copy(guid_t *dst, const guid_t *src) +{ + memcpy(dst, src, sizeof(guid_t)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void import_guid(guid_t *dst, const __u8 *src) +{ + memcpy(dst, src, sizeof(guid_t)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void export_guid(__u8 *dst, const guid_t *src) +{ + memcpy(dst, src, sizeof(guid_t)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool guid_is_null(const guid_t *guid) +{ + return guid_equal(guid, &guid_null); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void uuid_copy(uuid_t *dst, const uuid_t *src) +{ + memcpy(dst, src, sizeof(uuid_t)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void import_uuid(uuid_t *dst, const __u8 *src) +{ + memcpy(dst, src, sizeof(uuid_t)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void export_uuid(__u8 *dst, const uuid_t *src) +{ + memcpy(dst, src, sizeof(uuid_t)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool uuid_is_null(const uuid_t *uuid) +{ + return uuid_equal(uuid, &uuid_null); +} + +void generate_random_uuid(unsigned char uuid[16]); +void generate_random_guid(unsigned char guid[16]); + +extern void guid_gen(guid_t *u); +extern void uuid_gen(uuid_t *u); + +bool __attribute__((__warn_unused_result__)) uuid_is_valid(const char *uuid); + +extern const u8 guid_index[16]; +extern const u8 uuid_index[16]; + +int guid_parse(const char *uuid, guid_t *u); +int uuid_parse(const char *uuid, uuid_t *u); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int uuid_le_cmp(const guid_t u1, const guid_t u2) +{ + return memcmp(&u1, &u2, sizeof(guid_t)); +} +# 37 "./include/linux/fs.h" 2 +# 1 "./include/linux/errseq.h" 1 + + + + + + + +typedef u32 errseq_t; + +errseq_t errseq_set(errseq_t *eseq, int err); +errseq_t errseq_sample(errseq_t *eseq); +int errseq_check(errseq_t *eseq, errseq_t since); +int errseq_check_and_advance(errseq_t *eseq, errseq_t *since); +# 38 "./include/linux/fs.h" 2 +# 1 "./include/linux/ioprio.h" 1 + + + + + +# 1 "./include/linux/sched/rt.h" 1 + + + + + + +struct task_struct; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rt_prio(int prio) +{ + if (__builtin_expect(!!(prio < 100), 0)) + return 1; + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rt_task(struct task_struct *p) +{ + return rt_prio(p->prio); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_is_realtime(struct task_struct *tsk) +{ + int policy = tsk->policy; + + if (policy == 1 || policy == 2) + return true; + if (policy == 6) + return true; + return false; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct task_struct *rt_mutex_get_top_task(struct task_struct *p) +{ + return p->pi_top_task; +} +extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); +extern void rt_mutex_adjust_pi(struct task_struct *p); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool tsk_is_pi_blocked(struct task_struct *tsk) +{ + return tsk->pi_blocked_on != ((void *)0); +} +# 58 "./include/linux/sched/rt.h" +extern void normalize_rt_tasks(void); +# 7 "./include/linux/ioprio.h" 2 +# 1 "./include/linux/iocontext.h" 1 +# 9 "./include/linux/iocontext.h" +enum { + ICQ_EXITED = 1 << 2, + ICQ_DESTROYED = 1 << 3, +}; +# 73 "./include/linux/iocontext.h" +struct io_cq { + struct request_queue *q; + struct io_context *ioc; + + + + + + + + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct callback_head __rcu_head; + }; + + unsigned int flags; +}; + + + + + +struct io_context { + atomic_long_t refcount; + atomic_t active_ref; + atomic_t nr_tasks; + + + spinlock_t lock; + + unsigned short ioprio; + + + + + int nr_batch_requests; + unsigned long last_waited; + + struct xarray icq_tree; + struct io_cq *icq_hint; + struct hlist_head icq_list; + + struct work_struct release_work; +}; +# 130 "./include/linux/iocontext.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_io_context_active(struct io_context *ioc) +{ + ({ int __ret_warn_on = !!(atomic_long_read(&ioc->refcount) <= 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (881)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/iocontext.h"), "i" (132), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (882)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (883)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + ({ int __ret_warn_on = !!(atomic_read(&ioc->active_ref) <= 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (884)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/iocontext.h"), "i" (133), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (885)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (886)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + atomic_long_inc(&ioc->refcount); + atomic_inc(&ioc->active_ref); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ioc_task_link(struct io_context *ioc) +{ + get_io_context_active(ioc); + + ({ int __ret_warn_on = !!(atomic_read(&ioc->nr_tasks) <= 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (887)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/iocontext.h"), "i" (142), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (888)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (889)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + atomic_inc(&ioc->nr_tasks); +} + +struct task_struct; + +void put_io_context(struct io_context *ioc); +void put_io_context_active(struct io_context *ioc); +void exit_io_context(struct task_struct *task); +struct io_context *get_task_io_context(struct task_struct *task, + gfp_t gfp_flags, int node); +# 8 "./include/linux/ioprio.h" 2 +# 27 "./include/linux/ioprio.h" +enum { + IOPRIO_CLASS_NONE, + IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, + IOPRIO_CLASS_IDLE, +}; + + + + + + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP, + IOPRIO_WHO_USER, +}; +# 54 "./include/linux/ioprio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int task_nice_ioprio(struct task_struct *task) +{ + return (task_nice(task) + 20) / 5; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int task_nice_ioclass(struct task_struct *task) +{ + if (task->policy == 5) + return IOPRIO_CLASS_IDLE; + else if (task_is_realtime(task)) + return IOPRIO_CLASS_RT; + else + return IOPRIO_CLASS_BE; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_current_ioprio(void) +{ + struct io_context *ioc = get_current()->io_context; + + if (ioc) + return ioc->ioprio; + return (((IOPRIO_CLASS_NONE) << (13)) | 0); +} + + + + +extern int ioprio_best(unsigned short aprio, unsigned short bprio); + +extern int set_task_ioprio(struct task_struct *task, int ioprio); + + +extern int ioprio_check_cap(int ioprio); +# 39 "./include/linux/fs.h" 2 +# 1 "./include/linux/fs_types.h" 1 +# 71 "./include/linux/fs_types.h" +extern unsigned char fs_ftype_to_dtype(unsigned int filetype); +extern unsigned char fs_umode_to_ftype(umode_t mode); +extern unsigned char fs_umode_to_dtype(umode_t mode); +# 40 "./include/linux/fs.h" 2 + + + + +# 1 "./include/uapi/linux/fs.h" 1 +# 54 "./include/uapi/linux/fs.h" +struct file_clone_range { + __s64 src_fd; + __u64 src_offset; + __u64 src_length; + __u64 dest_offset; +}; + +struct fstrim_range { + __u64 start; + __u64 len; + __u64 minlen; +}; + + + + + + +struct file_dedupe_range_info { + __s64 dest_fd; + __u64 dest_offset; + __u64 bytes_deduped; + + + + + + + __s32 status; + __u32 reserved; +}; + + +struct file_dedupe_range { + __u64 src_offset; + __u64 src_length; + __u16 dest_count; + __u16 reserved1; + __u32 reserved2; + struct file_dedupe_range_info info[0]; +}; + + +struct files_stat_struct { + unsigned long nr_files; + unsigned long nr_free_files; + unsigned long max_files; +}; + +struct inodes_stat_t { + long nr_inodes; + long nr_unused; + long dummy[5]; +}; + + + + + + + +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + __u32 fsx_cowextsize; + unsigned char fsx_pad[8]; +}; +# 286 "./include/uapi/linux/fs.h" +typedef int __kernel_rwf_t; +# 45 "./include/linux/fs.h" 2 + +struct backing_dev_info; +struct bdi_writeback; +struct bio; +struct export_operations; +struct fiemap_extent_info; +struct hd_geometry; +struct iovec; +struct kiocb; +struct kobject; +struct pipe_inode_info; +struct poll_table_struct; +struct kstatfs; +struct vm_area_struct; +struct vfsmount; +struct cred; +struct swap_info_struct; +struct seq_file; +struct workqueue_struct; +struct iov_iter; +struct fscrypt_info; +struct fscrypt_operations; +struct fsverity_info; +struct fsverity_operations; +struct fs_context; +struct fs_parameter_spec; + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) inode_init(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) inode_init_early(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) files_init(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) files_maxfiles_init(void); + +extern struct files_stat_struct files_stat; +extern unsigned long get_max_files(void); +extern unsigned int sysctl_nr_open; +extern struct inodes_stat_t inodes_stat; +extern int leases_enable, lease_break_time; +extern int sysctl_protected_symlinks; +extern int sysctl_protected_hardlinks; +extern int sysctl_protected_fifos; +extern int sysctl_protected_regular; + +typedef __kernel_rwf_t rwf_t; + +struct buffer_head; +typedef int (get_block_t)(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create); +typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, + ssize_t bytes, void *private); +# 227 "./include/linux/fs.h" +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + kuid_t ia_uid; + kgid_t ia_gid; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + + + + + + + struct file *ia_file; +}; + + + + +# 1 "./include/linux/quota.h" 1 +# 40 "./include/linux/quota.h" +# 1 "./include/linux/percpu_counter.h" 1 +# 20 "./include/linux/percpu_counter.h" +struct percpu_counter { + raw_spinlock_t lock; + s64 count; + + struct list_head list; + + s32 *counters; +}; + +extern int percpu_counter_batch; + +int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, + struct lock_class_key *key); +# 41 "./include/linux/percpu_counter.h" +void percpu_counter_destroy(struct percpu_counter *fbc); +void percpu_counter_set(struct percpu_counter *fbc, s64 amount); +void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, + s32 batch); +s64 __percpu_counter_sum(struct percpu_counter *fbc); +int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) +{ + return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_counter_add(struct percpu_counter *fbc, s64 amount) +{ + percpu_counter_add_batch(fbc, amount, percpu_counter_batch); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc) +{ + s64 ret = __percpu_counter_sum(fbc); + return ret < 0 ? 0 : ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 percpu_counter_sum(struct percpu_counter *fbc) +{ + return __percpu_counter_sum(fbc); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 percpu_counter_read(struct percpu_counter *fbc) +{ + return fbc->count; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 percpu_counter_read_positive(struct percpu_counter *fbc) +{ + + s64 ret = ({ do { extern void __compiletime_assert_890(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(fbc->count) == sizeof(char) || sizeof(fbc->count) == sizeof(short) || sizeof(fbc->count) == sizeof(int) || sizeof(fbc->count) == sizeof(long)) || sizeof(fbc->count) == sizeof(long long))) __compiletime_assert_890(); } while (0); ({ typeof( _Generic((fbc->count), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (fbc->count))) __x = (*(const volatile typeof( _Generic((fbc->count), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (fbc->count))) *)&(fbc->count)); do { } while (0); (typeof(fbc->count))__x; }); }); + + if (ret >= 0) + return ret; + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_counter_initialized(struct percpu_counter *fbc) +{ + return (fbc->counters != ((void *)0)); +} +# 177 "./include/linux/percpu_counter.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_counter_inc(struct percpu_counter *fbc) +{ + percpu_counter_add(fbc, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_counter_dec(struct percpu_counter *fbc) +{ + percpu_counter_add(fbc, -1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) +{ + percpu_counter_add(fbc, -amount); +} +# 41 "./include/linux/quota.h" 2 + +# 1 "./include/uapi/linux/dqblk_xfs.h" 1 +# 53 "./include/uapi/linux/dqblk_xfs.h" +typedef struct fs_disk_quota { + __s8 d_version; + __s8 d_flags; + __u16 d_fieldmask; + __u32 d_id; + __u64 d_blk_hardlimit; + __u64 d_blk_softlimit; + __u64 d_ino_hardlimit; + __u64 d_ino_softlimit; + __u64 d_bcount; + __u64 d_icount; + __s32 d_itimer; + + __s32 d_btimer; + __u16 d_iwarns; + __u16 d_bwarns; + __s32 d_padding2; + __u64 d_rtb_hardlimit; + __u64 d_rtb_softlimit; + __u64 d_rtbcount; + __s32 d_rtbtimer; + __u16 d_rtbwarns; + __s16 d_padding3; + char d_padding4[8]; +} fs_disk_quota_t; +# 149 "./include/uapi/linux/dqblk_xfs.h" +typedef struct fs_qfilestat { + __u64 qfs_ino; + __u64 qfs_nblks; + __u32 qfs_nextents; +} fs_qfilestat_t; + +typedef struct fs_quota_stat { + __s8 qs_version; + __u16 qs_flags; + __s8 qs_pad; + fs_qfilestat_t qs_uquota; + fs_qfilestat_t qs_gquota; + __u32 qs_incoredqs; + __s32 qs_btimelimit; + __s32 qs_itimelimit; + __s32 qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; +} fs_quota_stat_t; +# 192 "./include/uapi/linux/dqblk_xfs.h" +struct fs_qfilestatv { + __u64 qfs_ino; + __u64 qfs_nblks; + __u32 qfs_nextents; + __u32 qfs_pad; +}; + +struct fs_quota_statv { + __s8 qs_version; + __u8 qs_pad1; + __u16 qs_flags; + __u32 qs_incoredqs; + struct fs_qfilestatv qs_uquota; + struct fs_qfilestatv qs_gquota; + struct fs_qfilestatv qs_pquota; + __s32 qs_btimelimit; + __s32 qs_itimelimit; + __s32 qs_rtbtimelimit; + __u16 qs_bwarnlimit; + __u16 qs_iwarnlimit; + __u64 qs_pad2[8]; +}; +# 43 "./include/linux/quota.h" 2 +# 1 "./include/linux/dqblk_v1.h" 1 +# 44 "./include/linux/quota.h" 2 +# 1 "./include/linux/dqblk_v2.h" 1 +# 9 "./include/linux/dqblk_v2.h" +# 1 "./include/linux/dqblk_qtree.h" 1 +# 18 "./include/linux/dqblk_qtree.h" +struct dquot; +struct kqid; + + +struct qtree_fmt_operations { + void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); + void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); + int (*is_id)(void *disk, struct dquot *dquot); +}; + + +struct qtree_mem_dqinfo { + struct super_block *dqi_sb; + int dqi_type; + unsigned int dqi_blocks; + unsigned int dqi_free_blk; + unsigned int dqi_free_entry; + unsigned int dqi_blocksize_bits; + unsigned int dqi_entry_size; + unsigned int dqi_usable_bs; + unsigned int dqi_qtree_depth; + const struct qtree_fmt_operations *dqi_ops; +}; + +int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qtree_depth(struct qtree_mem_dqinfo *info) +{ + unsigned int epb = info->dqi_usable_bs >> 2; + unsigned long long entries = epb; + int i; + + for (i = 1; entries < (1ULL << 32); i++) + entries *= epb; + return i; +} +int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid); +# 10 "./include/linux/dqblk_v2.h" 2 +# 45 "./include/linux/quota.h" 2 + + + +# 1 "./include/linux/projid.h" 1 +# 17 "./include/linux/projid.h" +struct user_namespace; +extern struct user_namespace init_user_ns; + +typedef __kernel_uid32_t projid_t; + +typedef struct { + projid_t val; +} kprojid_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) projid_t __kprojid_val(kprojid_t projid) +{ + return projid.val; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool projid_eq(kprojid_t left, kprojid_t right) +{ + return __kprojid_val(left) == __kprojid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool projid_lt(kprojid_t left, kprojid_t right) +{ + return __kprojid_val(left) < __kprojid_val(right); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool projid_valid(kprojid_t projid) +{ + return !projid_eq(projid, (kprojid_t){ -1 }); +} + + + +extern kprojid_t make_kprojid(struct user_namespace *from, projid_t projid); + +extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid); +extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid) +{ + return from_kprojid(ns, projid) != (projid_t)-1; +} +# 49 "./include/linux/quota.h" 2 +# 1 "./include/uapi/linux/quota.h" 1 +# 90 "./include/uapi/linux/quota.h" +enum { + QIF_BLIMITS_B = 0, + QIF_SPACE_B, + QIF_ILIMITS_B, + QIF_INODES_B, + QIF_BTIME_B, + QIF_ITIME_B, +}; +# 110 "./include/uapi/linux/quota.h" +struct if_dqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; +}; + +struct if_nextdqblk { + __u64 dqb_bhardlimit; + __u64 dqb_bsoftlimit; + __u64 dqb_curspace; + __u64 dqb_ihardlimit; + __u64 dqb_isoftlimit; + __u64 dqb_curinodes; + __u64 dqb_btime; + __u64 dqb_itime; + __u32 dqb_valid; + __u32 dqb_id; +}; +# 144 "./include/uapi/linux/quota.h" +enum { + DQF_ROOT_SQUASH_B = 0, + DQF_SYS_FILE_B = 16, + + DQF_PRIVATE +}; + + + + + + +struct if_dqinfo { + __u64 dqi_bgrace; + __u64 dqi_igrace; + __u32 dqi_flags; + __u32 dqi_valid; +}; +# 178 "./include/uapi/linux/quota.h" +enum { + QUOTA_NL_C_UNSPEC, + QUOTA_NL_C_WARNING, + __QUOTA_NL_C_MAX, +}; + + +enum { + QUOTA_NL_A_UNSPEC, + QUOTA_NL_A_QTYPE, + QUOTA_NL_A_EXCESS_ID, + QUOTA_NL_A_WARNING, + QUOTA_NL_A_DEV_MAJOR, + QUOTA_NL_A_DEV_MINOR, + QUOTA_NL_A_CAUSED_ID, + QUOTA_NL_A_PAD, + __QUOTA_NL_A_MAX, +}; +# 50 "./include/linux/quota.h" 2 + + + + +enum quota_type { + USRQUOTA = 0, + GRPQUOTA = 1, + PRJQUOTA = 2, +}; + + + + + + +typedef __kernel_uid32_t qid_t; +typedef long long qsize_t; + +struct kqid { + union { + kuid_t uid; + kgid_t gid; + kprojid_t projid; + }; + enum quota_type type; +}; + +extern bool qid_eq(struct kqid left, struct kqid right); +extern bool qid_lt(struct kqid left, struct kqid right); +extern qid_t from_kqid(struct user_namespace *to, struct kqid qid); +extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid); +extern bool qid_valid(struct kqid qid); +# 97 "./include/linux/quota.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kqid make_kqid(struct user_namespace *from, + enum quota_type type, qid_t qid) +{ + struct kqid kqid; + + kqid.type = type; + switch (type) { + case USRQUOTA: + kqid.uid = make_kuid(from, qid); + break; + case GRPQUOTA: + kqid.gid = make_kgid(from, qid); + break; + case PRJQUOTA: + kqid.projid = make_kprojid(from, qid); + break; + default: + do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (891)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/quota.h"), "i" (114), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (892)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); + } + return kqid; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kqid make_kqid_invalid(enum quota_type type) +{ + struct kqid kqid; + + kqid.type = type; + switch (type) { + case USRQUOTA: + kqid.uid = (kuid_t){ -1 }; + break; + case GRPQUOTA: + kqid.gid = (kgid_t){ -1 }; + break; + case PRJQUOTA: + kqid.projid = (kprojid_t){ -1 }; + break; + default: + do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (893)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/quota.h"), "i" (141), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (894)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); + } + return kqid; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kqid make_kqid_uid(kuid_t uid) +{ + struct kqid kqid; + kqid.type = USRQUOTA; + kqid.uid = uid; + return kqid; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kqid make_kqid_gid(kgid_t gid) +{ + struct kqid kqid; + kqid.type = GRPQUOTA; + kqid.gid = gid; + return kqid; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kqid make_kqid_projid(kprojid_t projid) +{ + struct kqid kqid; + kqid.type = PRJQUOTA; + kqid.projid = projid; + return kqid; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qid_has_mapping(struct user_namespace *ns, struct kqid qid) +{ + return from_kqid(ns, qid) != (qid_t) -1; +} + + +extern spinlock_t dq_data_lock; +# 205 "./include/linux/quota.h" +struct mem_dqblk { + qsize_t dqb_bhardlimit; + qsize_t dqb_bsoftlimit; + qsize_t dqb_curspace; + qsize_t dqb_rsvspace; + qsize_t dqb_ihardlimit; + qsize_t dqb_isoftlimit; + qsize_t dqb_curinodes; + time64_t dqb_btime; + time64_t dqb_itime; +}; + + + + +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; + + struct list_head dqi_dirty_list; + unsigned long dqi_flags; + unsigned int dqi_bgrace; + unsigned int dqi_igrace; + qsize_t dqi_max_spc_limit; + qsize_t dqi_max_ino_limit; + void *dqi_priv; +}; + +struct super_block; + + + + + + +enum { + DQF_INFO_DIRTY_B = DQF_PRIVATE, +}; + + +extern void mark_info_dirty(struct super_block *sb, int type); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int info_dirty(struct mem_dqinfo *info) +{ + return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags); +} + +enum { + DQST_LOOKUPS, + DQST_DROPS, + DQST_READS, + DQST_WRITES, + DQST_CACHE_HITS, + DQST_ALLOC_DQUOTS, + DQST_FREE_DQUOTS, + DQST_SYNCS, + _DQST_DQSTAT_LAST +}; + +struct dqstats { + unsigned long stat[_DQST_DQSTAT_LAST]; + struct percpu_counter counter[_DQST_DQSTAT_LAST]; +}; + +extern struct dqstats dqstats; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dqstats_inc(unsigned int type) +{ + percpu_counter_inc(&dqstats.counter[type]); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dqstats_dec(unsigned int type) +{ + percpu_counter_dec(&dqstats.counter[type]); +} +# 294 "./include/linux/quota.h" +struct dquot { + struct hlist_node dq_hash; + struct list_head dq_inuse; + struct list_head dq_free; + struct list_head dq_dirty; + struct mutex dq_lock; + spinlock_t dq_dqb_lock; + atomic_t dq_count; + struct super_block *dq_sb; + struct kqid dq_id; + loff_t dq_off; + unsigned long dq_flags; + struct mem_dqblk dq_dqb; +}; + + +struct quota_format_ops { + int (*check_quota_file)(struct super_block *sb, int type); + int (*read_file_info)(struct super_block *sb, int type); + int (*write_file_info)(struct super_block *sb, int type); + int (*free_file_info)(struct super_block *sb, int type); + int (*read_dqblk)(struct dquot *dquot); + int (*commit_dqblk)(struct dquot *dquot); + int (*release_dqblk)(struct dquot *dquot); + int (*get_next_id)(struct super_block *sb, struct kqid *qid); +}; + + +struct dquot_operations { + int (*write_dquot) (struct dquot *); + struct dquot *(*alloc_dquot)(struct super_block *, int); + void (*destroy_dquot)(struct dquot *); + int (*acquire_dquot) (struct dquot *); + int (*release_dquot) (struct dquot *); + int (*mark_dirty) (struct dquot *); + int (*write_info) (struct super_block *, int); + + + qsize_t *(*get_reserved_space) (struct inode *); + int (*get_projid) (struct inode *, kprojid_t *); + + int (*get_inode_usage) (struct inode *, qsize_t *); + + int (*get_next_id) (struct super_block *sb, struct kqid *qid); +}; + +struct path; + + +struct qc_dqblk { + int d_fieldmask; + u64 d_spc_hardlimit; + u64 d_spc_softlimit; + u64 d_ino_hardlimit; + u64 d_ino_softlimit; + u64 d_space; + u64 d_ino_count; + s64 d_ino_timer; + + s64 d_spc_timer; + int d_ino_warns; + int d_spc_warns; + u64 d_rt_spc_hardlimit; + u64 d_rt_spc_softlimit; + u64 d_rt_space; + s64 d_rt_spc_timer; + int d_rt_spc_warns; +}; +# 395 "./include/linux/quota.h" +struct qc_type_state { + unsigned int flags; + unsigned int spc_timelimit; + + unsigned int ino_timelimit; + unsigned int rt_spc_timelimit; + unsigned int spc_warnlimit; + unsigned int ino_warnlimit; + unsigned int rt_spc_warnlimit; + unsigned long long ino; + blkcnt_t blocks; + blkcnt_t nextents; +}; + +struct qc_state { + unsigned int s_incoredqs; + struct qc_type_state s_state[3]; +}; + + +struct qc_info { + int i_fieldmask; + unsigned int i_flags; + unsigned int i_spc_timelimit; + + unsigned int i_ino_timelimit; + unsigned int i_rt_spc_timelimit; + unsigned int i_spc_warnlimit; + unsigned int i_ino_warnlimit; + unsigned int i_rt_spc_warnlimit; +}; + + +struct quotactl_ops { + int (*quota_on)(struct super_block *, int, int, const struct path *); + int (*quota_off)(struct super_block *, int); + int (*quota_enable)(struct super_block *, unsigned int); + int (*quota_disable)(struct super_block *, unsigned int); + int (*quota_sync)(struct super_block *, int); + int (*set_info)(struct super_block *, int, struct qc_info *); + int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_nextdqblk)(struct super_block *, struct kqid *, + struct qc_dqblk *); + int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); + int (*get_state)(struct super_block *, struct qc_state *); + int (*rm_xquota)(struct super_block *, unsigned int); +}; + +struct quota_format_type { + int qf_fmt_id; + const struct quota_format_ops *qf_ops; + struct module *qf_owner; + struct quota_format_type *qf_next; +}; +# 463 "./include/linux/quota.h" +enum { + _DQUOT_USAGE_ENABLED = 0, + _DQUOT_LIMITS_ENABLED, + _DQUOT_SUSPENDED, + + + _DQUOT_STATE_FLAGS +}; +# 490 "./include/linux/quota.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int dquot_state_flag(unsigned int flags, int type) +{ + return flags << type; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int dquot_generic_flag(unsigned int flags, int type) +{ + return (flags >> type) & ((1 << _DQUOT_USAGE_ENABLED * 3) | (1 << _DQUOT_LIMITS_ENABLED * 3) | (1 << _DQUOT_SUSPENDED * 3)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned dquot_state_types(unsigned flags, unsigned flag) +{ + do { extern void __compiletime_assert_895(void) __attribute__((__error__("BUILD_BUG_ON failed: " "(flag) == 0 || (((flag) & ((flag) - 1)) != 0)"))); if (!(!((flag) == 0 || (((flag) & ((flag) - 1)) != 0)))) __compiletime_assert_895(); } while (0); + return (flags / flag) & ((1 << 3) - 1); +} + + +extern void quota_send_warning(struct kqid qid, dev_t dev, + const char warntype); +# 518 "./include/linux/quota.h" +struct quota_info { + unsigned int flags; + struct rw_semaphore dqio_sem; + struct inode *files[3]; + struct mem_dqinfo info[3]; + const struct quota_format_ops *ops[3]; +}; + +int register_quota_format(struct quota_format_type *fmt); +void unregister_quota_format(struct quota_format_type *fmt); + +struct quota_module_name { + int qm_fmt_id; + char *qm_mod_name; +}; +# 249 "./include/linux/fs.h" 2 +# 282 "./include/linux/fs.h" +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 0x80000, + AOP_TRUNCATED_PAGE = 0x80001, +}; +# 295 "./include/linux/fs.h" +struct page; +struct address_space; +struct writeback_control; +struct readahead_control; + + + + + +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = 1, + WRITE_LIFE_SHORT = 2, + WRITE_LIFE_MEDIUM = 3, + WRITE_LIFE_LONG = 4, + WRITE_LIFE_EXTREME = 5, +}; +# 324 "./include/linux/fs.h" +struct kiocb { + struct file *ki_filp; + + + + + loff_t ki_pos; + void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); + void *private; + int ki_flags; + u16 ki_hint; + u16 ki_ioprio; + union { + unsigned int ki_cookie; + struct wait_page_queue *ki_waitq; + }; + + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_sync_kiocb(struct kiocb *kiocb) +{ + return kiocb->ki_complete == ((void *)0); +} +# 358 "./include/linux/fs.h" +typedef struct { + size_t written; + size_t count; + union { + char *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); + +struct address_space_operations { + int (*writepage)(struct page *page, struct writeback_control *wbc); + int (*readpage)(struct file *, struct page *); + + + int (*writepages)(struct address_space *, struct writeback_control *); + + + int (*set_page_dirty)(struct page *page); + + + + + + int (*readpages)(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages); + void (*readahead)(struct readahead_control *); + + int (*write_begin)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); + int (*write_end)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + + + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidatepage) (struct page *, unsigned int, unsigned int); + int (*releasepage) (struct page *, gfp_t); + void (*freepage)(struct page *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); + + + + + int (*migratepage) (struct address_space *, + struct page *, struct page *, enum migrate_mode); + bool (*isolate_page)(struct page *, isolate_mode_t); + void (*putback_page)(struct page *); + int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, unsigned long, + unsigned long); + void (*is_dirty_writeback) (struct page *, bool *, bool *); + int (*error_remove_page)(struct address_space *, struct page *); + + + int (*swap_activate)(struct swap_info_struct *sis, struct file *file, + sector_t *span); + void (*swap_deactivate)(struct file *file); +}; + +extern const struct address_space_operations empty_aops; + + + + + +int pagecache_write_begin(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); + +int pagecache_write_end(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); +# 455 "./include/linux/fs.h" +struct address_space { + struct inode *host; + struct xarray i_pages; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + + + atomic_t nr_thps; + + struct rb_root_cached i_mmap; + struct rw_semaphore i_mmap_rwsem; + unsigned long nrpages; + unsigned long nrexceptional; + unsigned long writeback_index; + const struct address_space_operations *a_ops; + unsigned long flags; + errseq_t wb_err; + spinlock_t private_lock; + struct list_head private_list; + void *private_data; +} __attribute__((aligned(sizeof(long)))) __attribute__((__designated_init__)); + + + + + +struct request_queue; + +struct block_device { + dev_t bd_dev; + int bd_openers; + struct inode * bd_inode; + struct super_block * bd_super; + struct mutex bd_mutex; + void * bd_claiming; + void * bd_holder; + int bd_holders; + bool bd_write_holder; + + struct list_head bd_holder_disks; + + struct block_device * bd_contains; + unsigned bd_block_size; + u8 bd_partno; + struct hd_struct * bd_part; + + unsigned bd_part_count; + int bd_invalidated; + struct gendisk * bd_disk; + struct request_queue * bd_queue; + struct backing_dev_info *bd_bdi; + struct list_head bd_list; + + + + + + + unsigned long bd_private; + + + int bd_fsfreeze_count; + + struct mutex bd_fsfreeze_mutex; +} __attribute__((__designated_init__)); +# 529 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mapping_tagged(struct address_space *mapping, xa_mark_t tag) +{ + return xa_marked(&mapping->i_pages, tag); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_mmap_lock_write(struct address_space *mapping) +{ + down_write(&mapping->i_mmap_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int i_mmap_trylock_write(struct address_space *mapping) +{ + return down_write_trylock(&mapping->i_mmap_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_mmap_unlock_write(struct address_space *mapping) +{ + up_write(&mapping->i_mmap_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_mmap_lock_read(struct address_space *mapping) +{ + down_read(&mapping->i_mmap_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_mmap_unlock_read(struct address_space *mapping) +{ + up_read(&mapping->i_mmap_rwsem); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mapping_mapped(struct address_space *mapping) +{ + return !(({ do { extern void __compiletime_assert_896(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(char) || sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(short) || sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(int) || sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(long)) || sizeof((&mapping->i_mmap.rb_root)->rb_node) == sizeof(long long))) __compiletime_assert_896(); } while (0); ({ typeof( _Generic(((&mapping->i_mmap.rb_root)->rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&mapping->i_mmap.rb_root)->rb_node))) __x = (*(const volatile typeof( _Generic(((&mapping->i_mmap.rb_root)->rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&mapping->i_mmap.rb_root)->rb_node))) *)&((&mapping->i_mmap.rb_root)->rb_node)); do { } while (0); (typeof((&mapping->i_mmap.rb_root)->rb_node))__x; }); }) == ((void *)0)); +} +# 576 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mapping_writably_mapped(struct address_space *mapping) +{ + return atomic_read(&mapping->i_mmap_writable) > 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mapping_map_writable(struct address_space *mapping) +{ + return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? + 0 : -1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mapping_unmap_writable(struct address_space *mapping) +{ + atomic_dec(&mapping->i_mmap_writable); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mapping_deny_writable(struct address_space *mapping) +{ + return atomic_dec_unless_positive(&mapping->i_mmap_writable) ? + 0 : -16; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mapping_allow_writable(struct address_space *mapping) +{ + atomic_inc(&mapping->i_mmap_writable); +} +# 614 "./include/linux/fs.h" +struct posix_acl; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct posix_acl * +uncached_acl_sentinel(struct task_struct *task) +{ + return (void *)task + 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +is_uncached_acl(struct posix_acl *acl) +{ + return (long)acl & 1; +} + + + + + + + +struct fsnotify_mark_connector; + + + + + + +struct inode { + umode_t i_mode; + unsigned short i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + + + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; + + + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + + + void *i_security; + + + + unsigned long i_ino; + + + + + + + + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + struct timespec64 i_atime; + struct timespec64 i_mtime; + struct timespec64 i_ctime; + spinlock_t i_lock; + unsigned short i_bytes; + u8 i_blkbits; + u8 i_write_hint; + blkcnt_t i_blocks; + + + + + + + unsigned long i_state; + struct rw_semaphore i_rwsem; + + unsigned long dirtied_when; + unsigned long dirtied_time_when; + + struct hlist_node i_hash; + struct list_head i_io_list; + + struct bdi_writeback *i_wb; + + + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; + + struct list_head i_lru; + struct list_head i_sb_list; + struct list_head i_wb_list; + union { + struct hlist_head i_dentry; + struct callback_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; + + atomic_t i_readcount; + + union { + const struct file_operations *i_fop; + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct block_device *i_bdev; + struct cdev *i_cdev; + char *i_link; + unsigned i_dir_seq; + }; + + __u32 i_generation; + + + __u32 i_fsnotify_mask; + struct fsnotify_mark_connector *i_fsnotify_marks; + + + + struct fscrypt_info *i_crypt_info; + + + + struct fsverity_info *i_verity_info; + + + void *i_private; +} __attribute__((__designated_init__)); + +struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int i_blocksize(const struct inode *node) +{ + return (1 << node->i_blkbits); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int inode_unhashed(struct inode *inode) +{ + return hlist_unhashed(&inode->i_hash); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_fake_hash(struct inode *inode) +{ + hlist_add_fake(&inode->i_hash); +} +# 795 "./include/linux/fs.h" +enum inode_i_mutex_lock_class +{ + I_MUTEX_NORMAL, + I_MUTEX_PARENT, + I_MUTEX_CHILD, + I_MUTEX_XATTR, + I_MUTEX_NONDIR2, + I_MUTEX_PARENT2, +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_lock(struct inode *inode) +{ + down_write(&inode->i_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_unlock(struct inode *inode) +{ + up_write(&inode->i_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_lock_shared(struct inode *inode) +{ + down_read(&inode->i_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_unlock_shared(struct inode *inode) +{ + up_read(&inode->i_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int inode_trylock(struct inode *inode) +{ + return down_write_trylock(&inode->i_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int inode_trylock_shared(struct inode *inode) +{ + return down_read_trylock(&inode->i_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int inode_is_locked(struct inode *inode) +{ + return rwsem_is_locked(&inode->i_rwsem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_lock_nested(struct inode *inode, unsigned subclass) +{ + down_write_nested(&inode->i_rwsem, subclass); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_lock_shared_nested(struct inode *inode, unsigned subclass) +{ + down_read_nested(&inode->i_rwsem, subclass); +} + +void lock_two_nondirectories(struct inode *, struct inode*); +void unlock_two_nondirectories(struct inode *, struct inode*); +# 863 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) loff_t i_size_read(const struct inode *inode) +{ +# 882 "./include/linux/fs.h" + return inode->i_size; + +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_size_write(struct inode *inode, loff_t i_size) +{ +# 904 "./include/linux/fs.h" + inode->i_size = i_size; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned iminor(const struct inode *inode) +{ + return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned imajor(const struct inode *inode) +{ + return ((unsigned int) ((inode->i_rdev) >> 20)); +} + +extern struct block_device *I_BDEV(struct inode *inode); + +struct fown_struct { + rwlock_t lock; + struct pid *pid; + enum pid_type pid_type; + kuid_t uid, euid; + int signum; +}; + + + + +struct file_ra_state { + unsigned long start; + unsigned int size; + unsigned int async_size; + + + unsigned int ra_pages; + unsigned int mmap_miss; + loff_t prev_pos; +}; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ra_has_index(struct file_ra_state *ra, unsigned long index) +{ + return (index >= ra->start && + index < ra->start + ra->size); +} + +struct file { + union { + struct llist_node fu_llist; + struct callback_head fu_rcuhead; + } f_u; + struct path f_path; + struct inode *f_inode; + const struct file_operations *f_op; + + + + + + spinlock_t f_lock; + enum rw_hint f_write_hint; + atomic_long_t f_count; + unsigned int f_flags; + fmode_t f_mode; + struct mutex f_pos_lock; + loff_t f_pos; + struct fown_struct f_owner; + const struct cred *f_cred; + struct file_ra_state f_ra; + + u64 f_version; + + void *f_security; + + + void *private_data; + + + + struct list_head f_ep_links; + struct list_head f_tfile_llink; + + struct address_space *f_mapping; + errseq_t f_wb_err; + errseq_t f_sb_err; +} __attribute__((__designated_init__)) + __attribute__((aligned(4))); + +struct file_handle { + __u32 handle_bytes; + int handle_type; + + unsigned char f_handle[]; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct file *get_file(struct file *f) +{ + atomic_long_inc(&f->f_count); + return f; +} +# 1042 "./include/linux/fs.h" +typedef void *fl_owner_t; + +struct file_lock; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); + int (*lm_grant)(struct file_lock *, int); + bool (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock *, int, struct list_head *); + void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); +}; + +struct lock_manager { + struct list_head list; + + + + + bool block_opens; +}; + +struct net; +void locks_start_grace(struct net *, struct lock_manager *); +void locks_end_grace(struct lock_manager *); +bool locks_in_grace(struct net *); +bool opens_in_grace(struct net *); + + +# 1 "./include/linux/nfs_fs_i.h" 1 + + + + +struct nlm_lockowner; + + + + +struct nfs_lock_info { + u32 state; + struct nlm_lockowner *owner; + struct list_head list; +}; + +struct nfs4_lock_state; +struct nfs4_lock_info { + struct nfs4_lock_state *owner; +}; +# 1079 "./include/linux/fs.h" 2 +# 1097 "./include/linux/fs.h" +struct file_lock { + struct file_lock *fl_blocker; + struct list_head fl_list; + struct hlist_node fl_link; + struct list_head fl_blocked_requests; + + + struct list_head fl_blocked_member; + + + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + unsigned int fl_pid; + int fl_link_cpu; + wait_queue_head_t fl_wait; + struct file *fl_file; + loff_t fl_start; + loff_t fl_end; + + struct fasync_struct * fl_fasync; + + unsigned long fl_break_time; + unsigned long fl_downgrade_time; + + const struct file_lock_operations *fl_ops; + const struct lock_manager_operations *fl_lmops; + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; + int state; + unsigned int debug_id; + } afs; + } fl_u; +} __attribute__((__designated_init__)); + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; +# 1149 "./include/linux/fs.h" +extern void send_sigio(struct fown_struct *fown, int fd, int band); + + + + +extern int fcntl_getlk(struct file *, unsigned int, struct flock *); +extern int fcntl_setlk(unsigned int, struct file *, unsigned int, + struct flock *); + + + + + + + +extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); +extern int fcntl_getlease(struct file *filp); + + +void locks_free_lock_context(struct inode *inode); +void locks_free_lock(struct file_lock *fl); +extern void locks_init_lock(struct file_lock *); +extern struct file_lock * locks_alloc_lock(void); +extern void locks_copy_lock(struct file_lock *, struct file_lock *); +extern void locks_copy_conflock(struct file_lock *, struct file_lock *); +extern void locks_remove_posix(struct file *, fl_owner_t); +extern void locks_remove_file(struct file *); +extern void locks_release_private(struct file_lock *); +extern void posix_test_lock(struct file *, struct file_lock *); +extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); +extern int locks_delete_block(struct file_lock *); +extern int vfs_test_lock(struct file *, struct file_lock *); +extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); +extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); +extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); +extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); +extern void lease_get_mtime(struct inode *, struct timespec64 *time); +extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); +extern int vfs_setlease(struct file *, long, struct file_lock **, void **); +extern int lease_modify(struct file_lock *, int, struct list_head *); + +struct notifier_block; +extern int lease_register_notifier(struct notifier_block *); +extern void lease_unregister_notifier(struct notifier_block *); + +struct files_struct; +extern void show_fd_locks(struct seq_file *f, + struct file *filp, struct files_struct *files); +# 1334 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct inode *file_inode(const struct file *f) +{ + return f->f_inode; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dentry *file_dentry(const struct file *file) +{ + return d_real(file->f_path.dentry, file_inode(file)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int locks_lock_file_wait(struct file *filp, struct file_lock *fl) +{ + return locks_lock_inode_wait(file_inode(filp), fl); +} + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; + struct file *fa_file; + struct callback_head fa_rcu; +}; + + + + +extern int fasync_helper(int, struct file *, int, struct fasync_struct **); +extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); +extern int fasync_remove_entry(struct file *, struct fasync_struct **); +extern struct fasync_struct *fasync_alloc(void); +extern void fasync_free(struct fasync_struct *); + + +extern void kill_fasync(struct fasync_struct **, int, int); + +extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); +extern int f_setown(struct file *filp, unsigned long arg, int force); +extern void f_delown(struct file *filp); +extern pid_t f_getown(struct file *filp); +extern int send_sigurg(struct fown_struct *fown); +# 1427 "./include/linux/fs.h" +enum { + SB_UNFROZEN = 0, + SB_FREEZE_WRITE = 1, + SB_FREEZE_PAGEFAULT = 2, + SB_FREEZE_FS = 3, + + SB_FREEZE_COMPLETE = 4, +}; + + + +struct sb_writers { + int frozen; + wait_queue_head_t wait_unfrozen; + struct percpu_rw_semaphore rw_sem[(SB_FREEZE_COMPLETE - 1)]; +}; + +struct super_block { + struct list_head s_list; + dev_t s_dev; + unsigned char s_blocksize_bits; + unsigned long s_blocksize; + loff_t s_maxbytes; + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + unsigned long s_flags; + unsigned long s_iflags; + unsigned long s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; + + void *s_security; + + const struct xattr_handler **s_xattr; + + const struct fscrypt_operations *s_cop; + struct key *s_master_keys; + + + const struct fsverity_operations *s_vop; + + struct hlist_bl_head s_roots; + struct list_head s_mounts; + struct block_device *s_bdev; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; + struct quota_info s_dquot; + + struct sb_writers s_writers; + + + + + + + void *s_fs_info; + + + u32 s_time_gran; + + time64_t s_time_min; + time64_t s_time_max; + + __u32 s_fsnotify_mask; + struct fsnotify_mark_connector *s_fsnotify_marks; + + + char s_id[32]; + uuid_t s_uuid; + + unsigned int s_max_links; + fmode_t s_mode; + + + + + + struct mutex s_vfs_rename_mutex; + + + + + + const char *s_subtype; + + const struct dentry_operations *s_d_op; + + + + + int cleancache_poolid; + + struct shrinker s_shrink; + + + atomic_long_t s_remove_count; + + + atomic_long_t s_fsnotify_inode_refs; + + + int s_readonly_remount; + + + errseq_t s_wb_err; + + + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + + + + + + + struct user_namespace *s_user_ns; + + + + + + + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct callback_head rcu; + struct work_struct destroy_work; + + struct mutex s_sync_lock; + + + + + int s_stack_depth; + + + spinlock_t s_inode_list_lock __attribute__((__aligned__((1 << (6))))); + struct list_head s_inodes; + + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; +} __attribute__((__designated_init__)); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) uid_t i_uid_read(const struct inode *inode) +{ + return from_kuid(inode->i_sb->s_user_ns, inode->i_uid); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gid_t i_gid_read(const struct inode *inode) +{ + return from_kgid(inode->i_sb->s_user_ns, inode->i_gid); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_uid_write(struct inode *inode, uid_t uid) +{ + inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_gid_write(struct inode *inode, gid_t gid) +{ + inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); +} + +extern struct timespec64 current_time(struct inode *inode); + + + + + +void __sb_end_write(struct super_block *sb, int level); +int __sb_start_write(struct super_block *sb, int level, bool wait); +# 1622 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sb_end_write(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_WRITE); +} +# 1634 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sb_end_pagefault(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_PAGEFAULT); +} +# 1646 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sb_end_intwrite(struct super_block *sb) +{ + __sb_end_write(sb, SB_FREEZE_FS); +} +# 1670 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sb_start_write(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_WRITE, true); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sb_start_write_trylock(struct super_block *sb) +{ + return __sb_start_write(sb, SB_FREEZE_WRITE, false); +} +# 1699 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sb_start_pagefault(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true); +} +# 1717 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sb_start_intwrite(struct super_block *sb) +{ + __sb_start_write(sb, SB_FREEZE_FS, true); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sb_start_intwrite_trylock(struct super_block *sb) +{ + return __sb_start_write(sb, SB_FREEZE_FS, false); +} + + +extern bool inode_owner_or_capable(const struct inode *inode); + + + + +extern int vfs_create(struct inode *, struct dentry *, umode_t, bool); +extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); +extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); +extern int vfs_symlink(struct inode *, struct dentry *, const char *); +extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **); +extern int vfs_rmdir(struct inode *, struct dentry *); +extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); +extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vfs_whiteout(struct inode *dir, struct dentry *dentry) +{ + return vfs_mknod(dir, dentry, 0020000 | 0, 0); +} + +extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, + int open_flag); + +int vfs_mkobj(struct dentry *, umode_t, + int (*f)(struct dentry *, umode_t, void *), + void *); + +extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + + +extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + + + + + + + +extern void inode_init_owner(struct inode *inode, const struct inode *dir, + umode_t mode); +extern bool may_open_dev(const struct path *path); + + + + + + + +struct dir_context; +typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, + unsigned); + +struct dir_context { + filldir_t actor; + loff_t pos; +}; + +struct block_device_operations; +# 1832 "./include/linux/fs.h" +struct iov_iter; + +struct file_operations { + struct module *owner; + loff_t (*llseek) (struct file *, loff_t, int); + ssize_t (*read) (struct file *, char *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char *, size_t, loff_t *); + ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); + ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *kiocb, bool spin); + int (*iterate) (struct file *, struct dir_context *); + int (*iterate_shared) (struct file *, struct dir_context *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); + long (*compat_ioctl) (struct file *, unsigned int, unsigned long); + int (*mmap) (struct file *, struct vm_area_struct *); + unsigned long mmap_supported_flags; + int (*open) (struct inode *, struct file *); + int (*flush) (struct file *, fl_owner_t id); + int (*release) (struct inode *, struct file *); + int (*fsync) (struct file *, loff_t, loff_t, int datasync); + int (*fasync) (int, struct file *, int); + int (*lock) (struct file *, int, struct file_lock *); + ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); + unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + int (*check_flags)(int); + int (*flock) (struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*setlease)(struct file *, long, struct file_lock **, void **); + long (*fallocate)(struct file *file, int mode, loff_t offset, + loff_t len); + void (*show_fdinfo)(struct seq_file *m, struct file *f); + + + + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, + loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); + int (*fadvise)(struct file *, loff_t, loff_t, int); +} __attribute__((__designated_init__)); + +struct inode_operations { + struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); + const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); + int (*permission) (struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int); + + int (*readlink) (struct dentry *, char *,int); + + int (*create) (struct inode *,struct dentry *, umode_t, bool); + int (*link) (struct dentry *,struct inode *,struct dentry *); + int (*unlink) (struct inode *,struct dentry *); + int (*symlink) (struct inode *,struct dentry *,const char *); + int (*mkdir) (struct inode *,struct dentry *,umode_t); + int (*rmdir) (struct inode *,struct dentry *); + int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); + int (*rename) (struct inode *, struct dentry *, + struct inode *, struct dentry *, unsigned int); + int (*setattr) (struct dentry *, struct iattr *); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); + ssize_t (*listxattr) (struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, + u64 len); + int (*update_time)(struct inode *, struct timespec64 *, int); + int (*atomic_open)(struct inode *, struct dentry *, + struct file *, unsigned open_flag, + umode_t create_mode); + int (*tmpfile) (struct inode *, struct dentry *, umode_t); + int (*set_acl)(struct inode *, struct posix_acl *, int); +} __attribute__((__aligned__((1 << (6))))); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ssize_t call_read_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->read_iter(kio, iter); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ssize_t call_write_iter(struct file *file, struct kiocb *kio, + struct iov_iter *iter) +{ + return file->f_op->write_iter(kio, iter); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int call_mmap(struct file *file, struct vm_area_struct *vma) +{ + return file->f_op->mmap(file, vma); +} + +ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector, + unsigned long nr_segs, unsigned long fast_segs, + struct iovec *fast_pointer, + struct iovec **ret_pointer); + +extern ssize_t __vfs_read(struct file *, char *, size_t, loff_t *); +extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *); +extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *); +extern ssize_t vfs_readv(struct file *, const struct iovec *, + unsigned long, loff_t *, rwf_t); +extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, + loff_t, size_t, unsigned int); +extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t len, unsigned int flags); +extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *count, + unsigned int remap_flags); +extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); +extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); +extern int vfs_dedupe_file_range(struct file *file, + struct file_dedupe_range *same); +extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, + struct file *dst_file, loff_t dst_pos, + loff_t len, unsigned int remap_flags); + + +struct super_operations { + struct inode *(*alloc_inode)(struct super_block *sb); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + + void (*dirty_inode) (struct inode *, int flags); + int (*write_inode) (struct inode *, struct writeback_control *wbc); + int (*drop_inode) (struct inode *); + void (*evict_inode) (struct inode *); + void (*put_super) (struct super_block *); + int (*sync_fs)(struct super_block *sb, int wait); + int (*freeze_super) (struct super_block *); + int (*freeze_fs) (struct super_block *); + int (*thaw_super) (struct super_block *); + int (*unfreeze_fs) (struct super_block *); + int (*statfs) (struct dentry *, struct kstatfs *); + int (*remount_fs) (struct super_block *, int *, char *); + void (*umount_begin) (struct super_block *); + + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); + + ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); + ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot **(*get_dquots)(struct inode *); + + int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); + long (*nr_cached_objects)(struct super_block *, + struct shrink_control *); + long (*free_cached_objects)(struct super_block *, + struct shrink_control *); +}; +# 2030 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & 1; } +# 2060 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool HAS_UNMAPPED_ID(struct inode *inode) +{ + return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum rw_hint file_write_hint(struct file *file) +{ + if (file->f_write_hint != WRITE_LIFE_NOT_SET) + return file->f_write_hint; + + return file_inode(file)->i_write_hint; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int iocb_flags(struct file *file); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 ki_hint_validate(enum rw_hint hint) +{ + typeof(((struct kiocb *)0)->ki_hint) max_hint = -1; + + if (hint <= max_hint) + return hint; + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) +{ + *kiocb = (struct kiocb) { + .ki_filp = filp, + .ki_flags = iocb_flags(filp), + .ki_hint = ki_hint_validate(file_write_hint(filp)), + .ki_ioprio = get_current_ioprio(), + }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, + struct file *filp) +{ + *kiocb = (struct kiocb) { + .ki_filp = filp, + .ki_flags = kiocb_src->ki_flags, + .ki_hint = kiocb_src->ki_hint, + .ki_ioprio = kiocb_src->ki_ioprio, + .ki_pos = kiocb_src->ki_pos, + }; +} +# 2197 "./include/linux/fs.h" +extern void __mark_inode_dirty(struct inode *, int); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mark_inode_dirty(struct inode *inode) +{ + __mark_inode_dirty(inode, (((1 << 0) | (1 << 1)) | (1 << 2))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mark_inode_dirty_sync(struct inode *inode) +{ + __mark_inode_dirty(inode, (1 << 0)); +} + +extern void inc_nlink(struct inode *inode); +extern void drop_nlink(struct inode *inode); +extern void clear_nlink(struct inode *inode); +extern void set_nlink(struct inode *inode, unsigned int nlink); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_inc_link_count(struct inode *inode) +{ + inc_nlink(inode); + mark_inode_dirty(inode); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_dec_link_count(struct inode *inode) +{ + drop_nlink(inode); + mark_inode_dirty(inode); +} + +enum file_time_flags { + S_ATIME = 1, + S_MTIME = 2, + S_CTIME = 4, + S_VERSION = 8, +}; + +extern bool atime_needs_update(const struct path *, struct inode *); +extern void touch_atime(const struct path *); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void file_accessed(struct file *file) +{ + if (!(file->f_flags & 01000000)) + touch_atime(&file->f_path); +} + +extern int file_modified(struct file *file); + +int sync_inode(struct inode *inode, struct writeback_control *wbc); +int sync_inode_metadata(struct inode *inode, int wait); + +struct file_system_type { + const char *name; + int fs_flags; + + + + + + + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry *(*mount) (struct file_system_type *, int, + const char *, void *); + void (*kill_sb) (struct super_block *); + struct module *owner; + struct file_system_type * next; + struct hlist_head fs_supers; + + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[(SB_FREEZE_COMPLETE - 1)]; + + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key i_mutex_dir_key; +}; + + + + +extern struct dentry *mount_bdev(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int)); +# 2287 "./include/linux/fs.h" +extern struct dentry *mount_single(struct file_system_type *fs_type, + int flags, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_nodev(struct file_system_type *fs_type, + int flags, void *data, + int (*fill_super)(struct super_block *, void *, int)); +extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); +void generic_shutdown_super(struct super_block *sb); + +void kill_block_super(struct super_block *sb); + + + + + + +void kill_anon_super(struct super_block *sb); +void kill_litter_super(struct super_block *sb); +void deactivate_super(struct super_block *sb); +void deactivate_locked_super(struct super_block *sb); +int set_anon_super(struct super_block *s, void *data); +int set_anon_super_fc(struct super_block *s, struct fs_context *fc); +int get_anon_bdev(dev_t *); +void free_anon_bdev(dev_t); +struct super_block *sget_fc(struct fs_context *fc, + int (*test)(struct super_block *, struct fs_context *), + int (*set)(struct super_block *, struct fs_context *)); +struct super_block *sget(struct file_system_type *type, + int (*test)(struct super_block *,void *), + int (*set)(struct super_block *,void *), + int flags, void *data); +# 2336 "./include/linux/fs.h" +extern int register_filesystem(struct file_system_type *); +extern int unregister_filesystem(struct file_system_type *); +extern struct vfsmount *kern_mount(struct file_system_type *); +extern void kern_unmount(struct vfsmount *mnt); +extern int may_umount_tree(struct vfsmount *); +extern int may_umount(struct vfsmount *); +extern long do_mount(const char *, const char *, + const char *, unsigned long, void *); +extern struct vfsmount *collect_mounts(const struct path *); +extern void drop_collected_mounts(struct vfsmount *); +extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, + struct vfsmount *); +extern int vfs_statfs(const struct path *, struct kstatfs *); +extern int user_statfs(const char *, struct kstatfs *); +extern int fd_statfs(int, struct kstatfs *); +extern int freeze_super(struct super_block *super); +extern int thaw_super(struct super_block *super); +extern bool our_mnt(struct vfsmount *mnt); +extern __attribute__((__format__(printf, 2, 3))) +int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); +extern int super_setup_bdi(struct super_block *sb); + +extern int current_umask(void); + +extern void ihold(struct inode * inode); +extern void iput(struct inode *); +extern int generic_update_time(struct inode *, struct timespec64 *, int); + + +extern struct kobject *fs_kobj; + + + + +extern int locks_mandatory_locked(struct file *); +extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __mandatory_lock(struct inode *ino) +{ + return (ino->i_mode & (0002000 | 00010)) == 0002000; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mandatory_lock(struct inode *ino) +{ + return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int locks_verify_locked(struct file *file) +{ + if (mandatory_lock(file_inode(file))) + return locks_mandatory_locked(file); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int locks_verify_truncate(struct inode *inode, + struct file *f, + loff_t size) +{ + if (!inode->i_flctx || !mandatory_lock(inode)) + return 0; + + if (size < inode->i_size) { + return locks_mandatory_area(inode, f, size, inode->i_size - 1, + 1); + } else { + return locks_mandatory_area(inode, f, inode->i_size, size - 1, + 1); + } +} +# 2454 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int break_lease(struct inode *inode, unsigned int mode) +{ + + + + + + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + return __break_lease(inode, mode, 32); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int break_deleg(struct inode *inode, unsigned int mode) +{ + + + + + + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + return __break_lease(inode, mode, 4); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int try_break_deleg(struct inode *inode, struct inode **delegated_inode) +{ + int ret; + + ret = break_deleg(inode, 00000001|00004000); + if (ret == -11 && delegated_inode) { + *delegated_inode = inode; + ihold(inode); + } + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int break_deleg_wait(struct inode **delegated_inode) +{ + int ret; + + ret = break_deleg(*delegated_inode, 00000001); + iput(*delegated_inode); + *delegated_inode = ((void *)0); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int break_layout(struct inode *inode, bool wait) +{ + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + return __break_lease(inode, + wait ? 00000001 : 00000001 | 00004000, + 2048); + return 0; +} +# 2544 "./include/linux/fs.h" +struct audit_names; +struct filename { + const char *name; + const char *uptr; + int refcnt; + struct audit_names *aname; + const char iname[]; +}; + +extern long vfs_truncate(const struct path *, loff_t); +extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, + struct file *filp); +extern int vfs_fallocate(struct file *file, int mode, loff_t offset, + loff_t len); +extern long do_sys_open(int dfd, const char *filename, int flags, + umode_t mode); +extern struct file *file_open_name(struct filename *, int, umode_t); +extern struct file *filp_open(const char *, int, umode_t); +extern struct file *file_open_root(struct dentry *, struct vfsmount *, + const char *, int, umode_t); +extern struct file * dentry_open(const struct path *, int, const struct cred *); +extern struct file * open_with_fake_path(const struct path *, int, + struct inode*, const struct cred *); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct file *file_clone_open(struct file *file) +{ + return dentry_open(&file->f_path, file->f_flags, file->f_cred); +} +extern int filp_close(struct file *, fl_owner_t id); + +extern struct filename *getname_flags(const char *, int, int *); +extern struct filename *getname(const char *); +extern struct filename *getname_kernel(const char *); +extern void putname(struct filename *name); + +extern int finish_open(struct file *file, struct dentry *dentry, + int (*open)(struct inode *, struct file *)); +extern int finish_no_open(struct file *file, struct dentry *dentry); + + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) vfs_caches_init_early(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) vfs_caches_init(void); + +extern struct kmem_cache *names_cachep; + + + + + +extern int register_blkdev(unsigned int, const char *); +extern void unregister_blkdev(unsigned int, const char *); +extern struct block_device *bdget(dev_t); +extern struct block_device *bdgrab(struct block_device *bdev); +extern void bd_set_size(struct block_device *, loff_t size); +extern void bd_forget(struct inode *inode); +extern void bdput(struct block_device *); +extern void invalidate_bdev(struct block_device *); +extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); +extern int sync_blockdev(struct block_device *bdev); +extern struct super_block *freeze_bdev(struct block_device *); +extern void emergency_thaw_all(void); +extern void emergency_thaw_bdev(struct super_block *sb); +extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); +extern int fsync_bdev(struct block_device *); + +extern struct super_block *blockdev_superblock; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sb_is_blkdev_sb(struct super_block *sb) +{ + return sb == blockdev_superblock; +} +# 2644 "./include/linux/fs.h" +extern int sync_filesystem(struct super_block *); +extern const struct file_operations def_blk_fops; +extern const struct file_operations def_chr_fops; + +extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); +extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); +extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); +extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder); +extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, + void *holder); +extern struct block_device *bd_start_claiming(struct block_device *bdev, + void *holder); +extern void bd_finish_claiming(struct block_device *bdev, + struct block_device *whole, void *holder); +extern void bd_abort_claiming(struct block_device *bdev, + struct block_device *whole, void *holder); +extern void blkdev_put(struct block_device *bdev, fmode_t mode); + + +extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +extern void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk); +# 2688 "./include/linux/fs.h" +extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); +extern int register_chrdev_region(dev_t, unsigned, const char *); +extern int __register_chrdev(unsigned int major, unsigned int baseminor, + unsigned int count, const char *name, + const struct file_operations *fops); +extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, + unsigned int count, const char *name); +extern void unregister_chrdev_region(dev_t, unsigned); +extern void chrdev_show(struct seq_file *,off_t); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_chrdev(unsigned int major, const char *name, + const struct file_operations *fops) +{ + return __register_chrdev(major, 0, 256, name, fops); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void unregister_chrdev(unsigned int major, const char *name) +{ + __unregister_chrdev(major, 0, 256, name); +} + + + + + + + +extern const char *bdevname(struct block_device *bdev, char *buffer); +extern struct block_device *lookup_bdev(const char *); +extern void blkdev_show(struct seq_file *,off_t); + + + + + +extern void init_special_inode(struct inode *, umode_t, dev_t); + + +extern void make_bad_inode(struct inode *); +extern bool is_bad_inode(struct inode *); + + +extern int revalidate_disk(struct gendisk *); +extern int check_disk_change(struct block_device *); +extern int __invalidate_device(struct block_device *, bool); + +unsigned long invalidate_mapping_pages(struct address_space *mapping, + unsigned long start, unsigned long end); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void invalidate_remote_inode(struct inode *inode) +{ + if ((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || + (((inode->i_mode) & 00170000) == 0120000)) + invalidate_mapping_pages(inode->i_mapping, 0, -1); +} +extern int invalidate_inode_pages2(struct address_space *mapping); +extern int invalidate_inode_pages2_range(struct address_space *mapping, + unsigned long start, unsigned long end); +extern int write_inode_now(struct inode *, int); +extern int filemap_fdatawrite(struct address_space *); +extern int filemap_flush(struct address_space *); +extern int filemap_fdatawait_keep_errors(struct address_space *mapping); +extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, + loff_t lend); +extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, + loff_t start_byte, loff_t end_byte); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int filemap_fdatawait(struct address_space *mapping) +{ + return filemap_fdatawait_range(mapping, 0, ((long long)(~0ULL >> 1))); +} + +extern bool filemap_range_has_page(struct address_space *, loff_t lstart, + loff_t lend); +extern int filemap_write_and_wait_range(struct address_space *mapping, + loff_t lstart, loff_t lend); +extern int __filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end, int sync_mode); +extern int filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end); +extern int filemap_check_errors(struct address_space *mapping); +extern void __filemap_set_wb_err(struct address_space *mapping, int err); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int filemap_write_and_wait(struct address_space *mapping) +{ + return filemap_write_and_wait_range(mapping, 0, ((long long)(~0ULL >> 1))); +} + +extern int __attribute__((__warn_unused_result__)) file_fdatawait_range(struct file *file, loff_t lstart, + loff_t lend); +extern int __attribute__((__warn_unused_result__)) file_check_and_advance_wb_err(struct file *file); +extern int __attribute__((__warn_unused_result__)) file_write_and_wait_range(struct file *file, + loff_t start, loff_t end); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int file_write_and_wait(struct file *file) +{ + return file_write_and_wait_range(file, 0, ((long long)(~0ULL >> 1))); +} +# 2801 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void filemap_set_wb_err(struct address_space *mapping, int err) +{ + + if (__builtin_expect(!!(err), 0)) + __filemap_set_wb_err(mapping, err); +} +# 2818 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int filemap_check_wb_err(struct address_space *mapping, + errseq_t since) +{ + return errseq_check(&mapping->wb_err, since); +} +# 2831 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) errseq_t filemap_sample_wb_err(struct address_space *mapping) +{ + return errseq_sample(&mapping->wb_err); +} +# 2843 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) errseq_t file_sample_sb_err(struct file *file) +{ + return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int filemap_nr_thps(struct address_space *mapping) +{ + + return atomic_read(&mapping->nr_thps); + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void filemap_nr_thps_inc(struct address_space *mapping) +{ + + atomic_inc(&mapping->nr_thps); + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void filemap_nr_thps_dec(struct address_space *mapping) +{ + + atomic_dec(&mapping->nr_thps); + + + +} + +extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, + int datasync); +extern int vfs_fsync(struct file *file, int datasync); + +extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, + unsigned int flags); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) +{ + if (iocb->ki_flags & (1 << 4)) { + int ret = vfs_fsync_range(iocb->ki_filp, + iocb->ki_pos - count, iocb->ki_pos - 1, + (iocb->ki_flags & (1 << 5)) ? 0 : 1); + if (ret) + return ret; + } + + return count; +} + +extern void emergency_sync(void); +extern void emergency_remount(void); + + +extern int bmap(struct inode *inode, sector_t *block); + + + + + + + +extern int notify_change(struct dentry *, struct iattr *, struct inode **); +extern int inode_permission(struct inode *, int); +extern int generic_permission(struct inode *, int); +extern int __check_sticky(struct inode *dir, struct inode *inode); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool execute_ok(struct inode *inode) +{ + return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void file_start_write(struct file *file) +{ + if (!(((file_inode(file)->i_mode) & 00170000) == 0100000)) + return; + __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool file_start_write_trylock(struct file *file) +{ + if (!(((file_inode(file)->i_mode) & 00170000) == 0100000)) + return true; + return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void file_end_write(struct file *file) +{ + if (!(((file_inode(file)->i_mode) & 00170000) == 0100000)) + return; + __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); +} +# 2959 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_write_access(struct inode *inode) +{ + return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -26; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int deny_write_access(struct file *file) +{ + struct inode *inode = file_inode(file); + return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -26; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_write_access(struct inode * inode) +{ + atomic_dec(&inode->i_writecount); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void allow_write_access(struct file *file) +{ + if (file) + atomic_inc(&file_inode(file)->i_writecount); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool inode_is_open_for_write(const struct inode *inode) +{ + return atomic_read(&inode->i_writecount) > 0; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_readcount_dec(struct inode *inode) +{ + do { if (__builtin_expect(!!(!atomic_read(&inode->i_readcount)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (897)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/fs.h"), "i" (2985), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (898)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + atomic_dec(&inode->i_readcount); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void i_readcount_inc(struct inode *inode) +{ + atomic_inc(&inode->i_readcount); +} +# 3002 "./include/linux/fs.h" +extern int do_pipe_flags(int *, int); +# 3019 "./include/linux/fs.h" +enum kernel_read_file_id { + READING_UNKNOWN, READING_FIRMWARE, READING_FIRMWARE_PREALLOC_BUFFER, READING_FIRMWARE_EFI_EMBEDDED, READING_MODULE, READING_KEXEC_IMAGE, READING_KEXEC_INITRAMFS, READING_POLICY, READING_X509_CERTIFICATE, READING_MAX_ID, +}; + +static const char * const kernel_read_file_str[] = { + "unknown", "firmware", "firmware", "firmware", "kernel-module", "kexec-image", "kexec-initramfs", "security-policy", "x509-certificate", "", +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *kernel_read_file_id_str(enum kernel_read_file_id id) +{ + if ((unsigned)id >= READING_MAX_ID) + return kernel_read_file_str[READING_UNKNOWN]; + + return kernel_read_file_str[id]; +} + +extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_path(const char *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, + enum kernel_read_file_id); +extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); +extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); +extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); +extern struct file * open_exec(const char *); + + +extern bool is_subdir(struct dentry *, struct dentry *); +extern bool path_is_under(const struct path *, const struct path *); + +extern char *file_path(struct file *, char *, int); + + + + +extern loff_t default_llseek(struct file *file, loff_t offset, int whence); + +extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence); + +extern int inode_init_always(struct super_block *, struct inode *); +extern void inode_init_once(struct inode *); +extern void address_space_init_once(struct address_space *mapping); +extern struct inode * igrab(struct inode *); +extern ino_t iunique(struct super_block *, ino_t); +extern int inode_needs_sync(struct inode *inode); +extern int generic_delete_inode(struct inode *inode); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int generic_drop_inode(struct inode *inode) +{ + return !inode->i_nlink || inode_unhashed(inode) || + (inode->i_state & (1 << 16)); +} +extern void d_mark_dontcache(struct inode *inode); + +extern struct inode *ilookup5_nowait(struct super_block *sb, + unsigned long hashval, int (*test)(struct inode *, void *), + void *data); +extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, + int (*test)(struct inode *, void *), void *data); +extern struct inode *ilookup(struct super_block *sb, unsigned long ino); + +extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval, + int (*test)(struct inode *, void *), + int (*set)(struct inode *, void *), + void *data); +extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); +extern struct inode * iget_locked(struct super_block *, unsigned long); +extern struct inode *find_inode_nowait(struct super_block *, + unsigned long, + int (*match)(struct inode *, + unsigned long, void *), + void *data); +extern struct inode *find_inode_rcu(struct super_block *, unsigned long, + int (*)(struct inode *, void *), void *); +extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long); +extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); +extern int insert_inode_locked(struct inode *); + +extern void lockdep_annotate_inode_mutex_key(struct inode *inode); + + + +extern void unlock_new_inode(struct inode *); +extern void discard_new_inode(struct inode *); +extern unsigned int get_next_ino(void); +extern void evict_inodes(struct super_block *sb); + +extern void __iget(struct inode * inode); +extern void iget_failed(struct inode *); +extern void clear_inode(struct inode *); +extern void __destroy_inode(struct inode *); +extern struct inode *new_inode_pseudo(struct super_block *sb); +extern struct inode *new_inode(struct super_block *sb); +extern void free_inode_nonrcu(struct inode *inode); +extern int should_remove_suid(struct dentry *); +extern int file_remove_privs(struct file *); + +extern void __insert_inode_hash(struct inode *, unsigned long hashval); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insert_inode_hash(struct inode *inode) +{ + __insert_inode_hash(inode, inode->i_ino); +} + +extern void __remove_inode_hash(struct inode *); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void remove_inode_hash(struct inode *inode) +{ + if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash)) + __remove_inode_hash(inode); +} + +extern void inode_sb_list_add(struct inode *inode); + + +extern int bdev_read_only(struct block_device *); + +extern int set_blocksize(struct block_device *, int); +extern int sb_set_blocksize(struct super_block *, int); +extern int sb_min_blocksize(struct super_block *, int); + +extern int generic_file_mmap(struct file *, struct vm_area_struct *); +extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); +extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); +extern int generic_remap_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *count, unsigned int remap_flags); +extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); +extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t *count, unsigned int flags); +extern ssize_t generic_file_buffered_read(struct kiocb *iocb, + struct iov_iter *to, ssize_t already_read); +extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); +extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); +extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); +extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); +extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); + +ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, + rwf_t flags); +ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, + rwf_t flags); +ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb, + struct iov_iter *iter); +ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, + struct iov_iter *iter); + + +extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); +extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); +extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, + int datasync); +extern void block_sync_page(struct page *page); + + +extern ssize_t generic_file_splice_read(struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); +extern ssize_t iter_file_splice_write(struct pipe_inode_info *, + struct file *, loff_t *, size_t, unsigned int); +extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, + struct file *out, loff_t *, size_t len, unsigned int flags); +extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + loff_t *opos, size_t len, unsigned int flags); + + +extern void +file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); +extern loff_t noop_llseek(struct file *file, loff_t offset, int whence); +extern loff_t no_llseek(struct file *file, loff_t offset, int whence); +extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize); +extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence); +extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, + int whence, loff_t maxsize, loff_t eof); +extern loff_t fixed_size_llseek(struct file *file, loff_t offset, + int whence, loff_t size); +extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); +extern loff_t no_seek_end_llseek(struct file *, loff_t, int); +extern int generic_file_open(struct inode * inode, struct file * filp); +extern int nonseekable_open(struct inode * inode, struct file * filp); +extern int stream_open(struct inode * inode, struct file * filp); + + +typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, + loff_t file_offset); + +enum { + + DIO_LOCKING = 0x01, + + + DIO_SKIP_HOLES = 0x02, +}; + +void dio_end_io(struct bio *bio); + +ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, struct iov_iter *iter, + get_block_t get_block, + dio_iodone_t end_io, dio_submit_t submit_io, + int flags); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ssize_t blockdev_direct_IO(struct kiocb *iocb, + struct inode *inode, + struct iov_iter *iter, + get_block_t get_block) +{ + return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, + get_block, ((void *)0), ((void *)0), DIO_LOCKING | DIO_SKIP_HOLES); +} + + +void inode_dio_wait(struct inode *inode); +# 3240 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_dio_begin(struct inode *inode) +{ + atomic_inc(&inode->i_dio_count); +} +# 3252 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_dio_end(struct inode *inode) +{ + if (atomic_dec_and_test(&inode->i_dio_count)) + wake_up_bit(&inode->i_state, 9); +} + + + + +void dio_warn_stale_pagecache(struct file *filp); + +extern void inode_set_flags(struct inode *inode, unsigned int flags, + unsigned int mask); + +extern const struct file_operations generic_ro_fops; + + + +extern int readlink_copy(char *, int, const char *); +extern int page_readlink(struct dentry *, char *, int); +extern const char *page_get_link(struct dentry *, struct inode *, + struct delayed_call *); +extern void page_put_link(void *); +extern int __page_symlink(struct inode *inode, const char *symname, int len, + int nofs); +extern int page_symlink(struct inode *inode, const char *symname, int len); +extern const struct inode_operations page_symlink_inode_operations; +extern void kfree_link(void *); +extern void generic_fillattr(struct inode *, struct kstat *); +extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); +extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); +void __inode_add_bytes(struct inode *inode, loff_t bytes); +void inode_add_bytes(struct inode *inode, loff_t bytes); +void __inode_sub_bytes(struct inode *inode, loff_t bytes); +void inode_sub_bytes(struct inode *inode, loff_t bytes); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) loff_t __inode_get_bytes(struct inode *inode) +{ + return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; +} +loff_t inode_get_bytes(struct inode *inode); +void inode_set_bytes(struct inode *inode, loff_t bytes); +const char *simple_get_link(struct dentry *, struct inode *, + struct delayed_call *); +extern const struct inode_operations simple_symlink_inode_operations; + +extern int iterate_dir(struct file *, struct dir_context *); + +extern int vfs_statx(int, const char *, int, struct kstat *, u32); +extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vfs_stat(const char *filename, struct kstat *stat) +{ + return vfs_statx(-100, filename, 0x800, + stat, 0x000007ffU); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vfs_lstat(const char *name, struct kstat *stat) +{ + return vfs_statx(-100, name, 0x100 | 0x800, + stat, 0x000007ffU); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vfs_fstatat(int dfd, const char *filename, + struct kstat *stat, int flags) +{ + return vfs_statx(dfd, filename, flags | 0x800, + stat, 0x000007ffU); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vfs_fstat(int fd, struct kstat *stat) +{ + return vfs_statx_fd(fd, stat, 0x000007ffU, 0); +} + + +extern const char *vfs_get_link(struct dentry *, struct delayed_call *); +extern int vfs_readlink(struct dentry *, char *, int); + +extern struct file_system_type *get_filesystem(struct file_system_type *fs); +extern void put_filesystem(struct file_system_type *fs); +extern struct file_system_type *get_fs_type(const char *name); +extern struct super_block *get_super(struct block_device *); +extern struct super_block *get_super_thawed(struct block_device *); +extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev); +extern struct super_block *get_active_super(struct block_device *bdev); +extern void drop_super(struct super_block *sb); +extern void drop_super_exclusive(struct super_block *sb); +extern void iterate_supers(void (*)(struct super_block *, void *), void *); +extern void iterate_supers_type(struct file_system_type *, + void (*)(struct super_block *, void *), void *); + +extern int dcache_dir_open(struct inode *, struct file *); +extern int dcache_dir_close(struct inode *, struct file *); +extern loff_t dcache_dir_lseek(struct file *, loff_t, int); +extern int dcache_readdir(struct file *, struct dir_context *); +extern int simple_setattr(struct dentry *, struct iattr *); +extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int); +extern int simple_statfs(struct dentry *, struct kstatfs *); +extern int simple_open(struct inode *inode, struct file *file); +extern int simple_link(struct dentry *, struct inode *, struct dentry *); +extern int simple_unlink(struct inode *, struct dentry *); +extern int simple_rmdir(struct inode *, struct dentry *); +extern int simple_rename(struct inode *, struct dentry *, + struct inode *, struct dentry *, unsigned int); +extern void simple_recursive_removal(struct dentry *, + void (*callback)(struct dentry *)); +extern int noop_fsync(struct file *, loff_t, loff_t, int); +extern int noop_set_page_dirty(struct page *page); +extern void noop_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); +extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); +extern int simple_empty(struct dentry *); +extern int simple_readpage(struct file *file, struct page *page); +extern int simple_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); +extern int simple_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); +extern int always_delete_dentry(const struct dentry *); +extern struct inode *alloc_anon_inode(struct super_block *); +extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); +extern const struct dentry_operations simple_dentry_operations; + +extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); +extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *); +extern const struct file_operations simple_dir_operations; +extern const struct inode_operations simple_dir_inode_operations; +extern void make_empty_dir_inode(struct inode *inode); +extern bool is_empty_dir_inode(struct inode *inode); +struct tree_descr { const char *name; const struct file_operations *ops; int mode; }; +struct dentry *d_alloc_name(struct dentry *, const char *); +extern int simple_fill_super(struct super_block *, unsigned long, + const struct tree_descr *); +extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); +extern void simple_release_fs(struct vfsmount **mount, int *count); + +extern ssize_t simple_read_from_buffer(void *to, size_t count, + loff_t *ppos, const void *from, size_t available); +extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void *from, size_t count); + +extern int __generic_file_fsync(struct file *, loff_t, loff_t, int); +extern int generic_file_fsync(struct file *, loff_t, loff_t, int); + +extern int generic_check_addressable(unsigned, u64); + + +extern int buffer_migrate_page(struct address_space *, + struct page *, struct page *, + enum migrate_mode); +extern int buffer_migrate_page_norefs(struct address_space *, + struct page *, struct page *, + enum migrate_mode); + + + + + +extern int setattr_prepare(struct dentry *, struct iattr *); +extern int inode_newsize_ok(const struct inode *, loff_t offset); +extern void setattr_copy(struct inode *inode, const struct iattr *attr); + +extern int file_update_time(struct file *file); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vma_is_dax(const struct vm_area_struct *vma) +{ + return vma->vm_file && ((vma->vm_file->f_mapping->host)->i_flags & 8192); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vma_is_fsdax(struct vm_area_struct *vma) +{ + struct inode *inode; + + if (!vma->vm_file) + return false; + if (!vma_is_dax(vma)) + return false; + inode = file_inode(vma->vm_file); + if ((((inode->i_mode) & 00170000) == 0020000)) + return false; + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int iocb_flags(struct file *file) +{ + int res = 0; + if (file->f_flags & 00002000) + res |= (1 << 1); + if (file->f_flags & 00040000) + res |= (1 << 2); + if ((file->f_flags & 00010000) || (((file->f_mapping->host)->i_sb->s_flags & (16)) || ((file->f_mapping->host)->i_flags & 1))) + res |= (1 << 4); + if (file->f_flags & 04000000) + res |= (1 << 5); + return res; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) +{ + if (__builtin_expect(!!(flags & ~((( __kernel_rwf_t)0x00000001) | (( __kernel_rwf_t)0x00000002) | (( __kernel_rwf_t)0x00000004) | (( __kernel_rwf_t)0x00000008) | (( __kernel_rwf_t)0x00000010))), 0)) + return -95; + + if (flags & (( __kernel_rwf_t)0x00000008)) { + if (!(ki->ki_filp->f_mode & (( fmode_t)0x8000000))) + return -95; + ki->ki_flags |= (1 << 7); + } + if (flags & (( __kernel_rwf_t)0x00000001)) + ki->ki_flags |= (1 << 3); + if (flags & (( __kernel_rwf_t)0x00000002)) + ki->ki_flags |= (1 << 4); + if (flags & (( __kernel_rwf_t)0x00000004)) + ki->ki_flags |= ((1 << 4) | (1 << 5)); + if (flags & (( __kernel_rwf_t)0x00000010)) + ki->ki_flags |= (1 << 1); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ino_t parent_ino(struct dentry *dentry) +{ + ino_t res; + + + + + + spin_lock(&dentry->d_lockref.lock); + res = dentry->d_parent->d_inode->i_ino; + spin_unlock(&dentry->d_lockref.lock); + return res; +} + + + + + + + +struct simple_transaction_argresp { + ssize_t size; + char data[0]; +}; + + + +char *simple_transaction_get(struct file *file, const char *buf, + size_t size); +ssize_t simple_transaction_read(struct file *file, char *buf, + size_t size, loff_t *pos); +int simple_transaction_release(struct inode *inode, struct file *file); + +void simple_transaction_set(struct file *file, size_t n); +# 3534 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__format__(printf, 1, 2))) +void __simple_attr_check_format(const char *fmt, ...) +{ + +} + +int simple_attr_open(struct inode *inode, struct file *file, + int (*get)(void *, u64 *), int (*set)(void *, u64), + const char *fmt); +int simple_attr_release(struct inode *inode, struct file *file); +ssize_t simple_attr_read(struct file *file, char *buf, + size_t len, loff_t *ppos); +ssize_t simple_attr_write(struct file *file, const char *buf, + size_t len, loff_t *ppos); + +struct ctl_table; +int proc_nr_files(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int proc_nr_dentry(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int proc_nr_inodes(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) get_filesystem_list(char *buf); +# 3565 "./include/linux/fs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_sxid(umode_t mode) +{ + return (mode & 0004000) || ((mode & 0002000) && (mode & 00010)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int check_sticky(struct inode *dir, struct inode *inode) +{ + if (!(dir->i_mode & 0001000)) + return 0; + + return __check_sticky(dir, inode); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_has_no_xattr(struct inode *inode) +{ + if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28))) + inode->i_flags |= 4096; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_root_inode(struct inode *inode) +{ + return inode == inode->i_sb->s_root->d_inode; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dir_emit(struct dir_context *ctx, + const char *name, int namelen, + u64 ino, unsigned type) +{ + return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dir_emit_dot(struct file *file, struct dir_context *ctx) +{ + return ctx->actor(ctx, ".", 1, ctx->pos, + file->f_path.dentry->d_inode->i_ino, 4) == 0; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dir_emit_dotdot(struct file *file, struct dir_context *ctx) +{ + return ctx->actor(ctx, "..", 2, ctx->pos, + parent_ino(file->f_path.dentry), 4) == 0; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dir_emit_dots(struct file *file, struct dir_context *ctx) +{ + if (ctx->pos == 0) { + if (!dir_emit_dot(file, ctx)) + return false; + ctx->pos = 1; + } + if (ctx->pos == 1) { + if (!dir_emit_dotdot(file, ctx)) + return false; + ctx->pos = 2; + } + return true; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dir_relax(struct inode *inode) +{ + inode_unlock(inode); + inode_lock(inode); + return !((inode)->i_flags & 16); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dir_relax_shared(struct inode *inode) +{ + inode_unlock_shared(inode); + inode_lock_shared(inode); + return !((inode)->i_flags & 16); +} + +extern bool path_noexec(const struct path *path); +extern void inode_nohighmem(struct inode *inode); + + +extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, + int advice); +extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, + int advice); + + +extern struct sock *io_uring_get_socket(struct file *file); + + + + + + + +int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, + unsigned int flags); + +int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa, + struct fsxattr *fa); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags) +{ + memset(fa, 0, sizeof(*fa)); + fa->fsx_xflags = xflags; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int inode_drain_writes(struct inode *inode) +{ + inode_dio_wait(inode); + return filemap_write_and_wait(inode->i_mapping); +} +# 32 "./include/uapi/linux/aio_abi.h" 2 + + +typedef __kernel_ulong_t aio_context_t; + +enum { + IOCB_CMD_PREAD = 0, + IOCB_CMD_PWRITE = 1, + IOCB_CMD_FSYNC = 2, + IOCB_CMD_FDSYNC = 3, + + IOCB_CMD_POLL = 5, + IOCB_CMD_NOOP = 6, + IOCB_CMD_PREADV = 7, + IOCB_CMD_PWRITEV = 8, +}; +# 60 "./include/uapi/linux/aio_abi.h" +struct io_event { + __u64 data; + __u64 obj; + __s64 res; + __s64 res2; +}; + + + + + + + +struct iocb { + + __u64 aio_data; + + + __u32 aio_key; + __kernel_rwf_t aio_rw_flags; +# 88 "./include/uapi/linux/aio_abi.h" + __u16 aio_lio_opcode; + __s16 aio_reqprio; + __u32 aio_fildes; + + __u64 aio_buf; + __u64 aio_nbytes; + __s64 aio_offset; + + + __u64 aio_reserved2; + + + __u32 aio_flags; + + + + + + __u32 aio_resfd; +}; +# 75 "./include/linux/syscalls.h" 2 +# 85 "./include/linux/syscalls.h" +# 1 "./include/trace/syscall.h" 1 + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 23 "./include/linux/tracepoint.h" +struct module; +struct tracepoint; +struct notifier_block; + +struct trace_eval_map { + const char *system; + const char *eval_string; + unsigned long eval_value; +}; + + + +extern struct srcu_struct tracepoint_srcu; + +extern int +tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); +extern int +tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, + int prio); +extern int +tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); +extern void +for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), + void *priv); + + +struct tp_module { + struct list_head list; + struct module *mod; +}; + +bool trace_module_has_bad_taint(struct module *mod); +extern int register_tracepoint_module_notifier(struct notifier_block *nb); +extern int unregister_tracepoint_module_notifier(struct notifier_block *nb); +# 80 "./include/linux/tracepoint.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tracepoint_synchronize_unregister(void) +{ + synchronize_srcu(&tracepoint_srcu); + synchronize_rcu(); +} + + + + + + +extern int syscall_regfunc(void); +extern void syscall_unregfunc(void); +# 101 "./include/linux/tracepoint.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) +{ + return offset_to_ptr(p); +} +# 6 "./include/trace/syscall.h" 2 + +# 1 "./include/linux/trace_events.h" 1 + + + + + +# 1 "./include/linux/ring_buffer.h" 1 + + + + +# 1 "./include/linux/mm.h" 1 +# 18 "./include/linux/mm.h" +# 1 "./include/linux/mmap_lock.h" 1 +# 9 "./include/linux/mmap_lock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_init_lock(struct mm_struct *mm) +{ + do { static struct lock_class_key __key; __init_rwsem((&mm->mmap_lock), "&mm->mmap_lock", &__key); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_write_lock(struct mm_struct *mm) +{ + down_write(&mm->mmap_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_write_lock_nested(struct mm_struct *mm, int subclass) +{ + down_write_nested(&mm->mmap_lock, subclass); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mmap_write_lock_killable(struct mm_struct *mm) +{ + return down_write_killable(&mm->mmap_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mmap_write_trylock(struct mm_struct *mm) +{ + return down_write_trylock(&mm->mmap_lock) != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_write_unlock(struct mm_struct *mm) +{ + up_write(&mm->mmap_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_write_downgrade(struct mm_struct *mm) +{ + downgrade_write(&mm->mmap_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_read_lock(struct mm_struct *mm) +{ + down_read(&mm->mmap_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mmap_read_lock_killable(struct mm_struct *mm) +{ + return down_read_killable(&mm->mmap_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mmap_read_trylock(struct mm_struct *mm) +{ + return down_read_trylock(&mm->mmap_lock) != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_read_unlock(struct mm_struct *mm) +{ + up_read(&mm->mmap_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mmap_read_trylock_non_owner(struct mm_struct *mm) +{ + if (down_read_trylock(&mm->mmap_lock)) { + lock_release(&mm->mmap_lock.dep_map, (unsigned long)__builtin_return_address(0)); + return true; + } + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_read_unlock_non_owner(struct mm_struct *mm) +{ + up_read_non_owner(&mm->mmap_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_assert_locked(struct mm_struct *mm) +{ + do { ({ int __ret_warn_on = !!(debug_locks && !lock_is_held(&(&mm->mmap_lock)->dep_map)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (899)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mmap_lock.h"), "i" (80), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (900)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (901)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); + do { if (__builtin_expect(!!(!rwsem_is_locked(&mm->mmap_lock)), 0)) { dump_mm(mm); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (902)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mmap_lock.h"), "i" (81), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (903)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmap_assert_write_locked(struct mm_struct *mm) +{ + do { ({ int __ret_warn_on = !!(debug_locks && !lock_is_held_type(&(&mm->mmap_lock)->dep_map, (0))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (904)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mmap_lock.h"), "i" (86), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (905)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (906)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); + do { if (__builtin_expect(!!(!rwsem_is_locked(&mm->mmap_lock)), 0)) { dump_mm(mm); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (907)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mmap_lock.h"), "i" (87), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (908)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); +} +# 19 "./include/linux/mm.h" 2 + + +# 1 "./include/linux/percpu-refcount.h" 1 +# 59 "./include/linux/percpu-refcount.h" +struct percpu_ref; +typedef void (percpu_ref_func_t)(struct percpu_ref *); + + +enum { + __PERCPU_REF_ATOMIC = 1LU << 0, + __PERCPU_REF_DEAD = 1LU << 1, + __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, + + __PERCPU_REF_FLAG_BITS = 2, +}; + + +enum { + + + + + + + + PERCPU_REF_INIT_ATOMIC = 1 << 0, + + + + + + + PERCPU_REF_INIT_DEAD = 1 << 1, + + + + + PERCPU_REF_ALLOW_REINIT = 1 << 2, +}; + +struct percpu_ref { + atomic_long_t count; + + + + + unsigned long percpu_count_ptr; + percpu_ref_func_t *release; + percpu_ref_func_t *confirm_switch; + bool force_atomic:1; + bool allow_reinit:1; + struct callback_head rcu; +}; + +int __attribute__((__warn_unused_result__)) percpu_ref_init(struct percpu_ref *ref, + percpu_ref_func_t *release, unsigned int flags, + gfp_t gfp); +void percpu_ref_exit(struct percpu_ref *ref); +void percpu_ref_switch_to_atomic(struct percpu_ref *ref, + percpu_ref_func_t *confirm_switch); +void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref); +void percpu_ref_switch_to_percpu(struct percpu_ref *ref); +void percpu_ref_kill_and_confirm(struct percpu_ref *ref, + percpu_ref_func_t *confirm_kill); +void percpu_ref_resurrect(struct percpu_ref *ref); +void percpu_ref_reinit(struct percpu_ref *ref); +# 134 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_kill(struct percpu_ref *ref) +{ + percpu_ref_kill_and_confirm(ref, ((void *)0)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __ref_is_percpu(struct percpu_ref *ref, + unsigned long **percpu_countp) +{ + unsigned long percpu_ptr; +# 161 "./include/linux/percpu-refcount.h" + percpu_ptr = ({ do { extern void __compiletime_assert_909(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ref->percpu_count_ptr) == sizeof(char) || sizeof(ref->percpu_count_ptr) == sizeof(short) || sizeof(ref->percpu_count_ptr) == sizeof(int) || sizeof(ref->percpu_count_ptr) == sizeof(long)) || sizeof(ref->percpu_count_ptr) == sizeof(long long))) __compiletime_assert_909(); } while (0); ({ typeof( _Generic((ref->percpu_count_ptr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ref->percpu_count_ptr))) __x = (*(const volatile typeof( _Generic((ref->percpu_count_ptr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ref->percpu_count_ptr))) *)&(ref->percpu_count_ptr)); do { } while (0); (typeof(ref->percpu_count_ptr))__x; }); }); + + + + + + + + if (__builtin_expect(!!(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD), 0)) + return false; + + *percpu_countp = (unsigned long *)percpu_ptr; + return true; +} +# 185 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) +{ + unsigned long *percpu_count; + + rcu_read_lock(); + + if (__ref_is_percpu(ref, &percpu_count)) + do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); + else + atomic_long_add(nr, &ref->count); + + rcu_read_unlock(); +} +# 207 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_get(struct percpu_ref *ref) +{ + percpu_ref_get_many(ref, 1); +} +# 222 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_tryget_many(struct percpu_ref *ref, + unsigned long nr) +{ + unsigned long *percpu_count; + bool ret; + + rcu_read_lock(); + + if (__ref_is_percpu(ref, &percpu_count)) { + do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(nr) && ((nr) == 1 || (nr) == -1)) ? (int)(nr) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (nr); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(nr))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(nr))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(nr))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); + ret = true; + } else { + ret = atomic_long_add_unless(&ref->count, nr, 0); + } + + rcu_read_unlock(); + + return ret; +} +# 251 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_tryget(struct percpu_ref *ref) +{ + return percpu_ref_tryget_many(ref, 1); +} +# 271 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_tryget_live(struct percpu_ref *ref) +{ + unsigned long *percpu_count; + bool ret = false; + + rcu_read_lock(); + + if (__ref_is_percpu(ref, &percpu_count)) { + do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); + ret = true; + } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { + ret = atomic_long_inc_not_zero(&ref->count); + } + + rcu_read_unlock(); + + return ret; +} +# 300 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) +{ + unsigned long *percpu_count; + + rcu_read_lock(); + + if (__ref_is_percpu(ref, &percpu_count)) + do { do { const void *__vpp_verify = (typeof((&(*percpu_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*percpu_count)) { case 1: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*percpu_count))(nr)) && ((-(typeof(*percpu_count))(nr)) == 1 || (-(typeof(*percpu_count))(nr)) == -1)) ? (int)(-(typeof(*percpu_count))(nr)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*percpu_count))(nr)); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*percpu_count))(nr)) && ((-(typeof(*percpu_count))(nr)) == 1 || (-(typeof(*percpu_count))(nr)) == -1)) ? (int)(-(typeof(*percpu_count))(nr)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*percpu_count))(nr)); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*percpu_count))(nr)) && ((-(typeof(*percpu_count))(nr)) == 1 || (-(typeof(*percpu_count))(nr)) == -1)) ? (int)(-(typeof(*percpu_count))(nr)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*percpu_count))(nr)); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*percpu_count)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*percpu_count))(nr)) && ((-(typeof(*percpu_count))(nr)) == 1 || (-(typeof(*percpu_count))(nr)) == -1)) ? (int)(-(typeof(*percpu_count))(nr)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*percpu_count))(nr)); (void)pao_tmp__; } switch (sizeof((*percpu_count))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "qi" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "ri" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*percpu_count))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*percpu_count)) : "re" ((pao_T__)(-(typeof(*percpu_count))(nr)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); + else if (__builtin_expect(!!(atomic_long_sub_and_test(nr, &ref->count)), 0)) + ref->release(ref); + + rcu_read_unlock(); +} +# 323 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void percpu_ref_put(struct percpu_ref *ref) +{ + percpu_ref_put_many(ref, 1); +} +# 337 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_is_dying(struct percpu_ref *ref) +{ + return ref->percpu_count_ptr & __PERCPU_REF_DEAD; +} +# 350 "./include/linux/percpu-refcount.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool percpu_ref_is_zero(struct percpu_ref *ref) +{ + unsigned long *percpu_count; + + if (__ref_is_percpu(ref, &percpu_count)) + return false; + return !atomic_long_read(&ref->count); +} +# 22 "./include/linux/mm.h" 2 + + + +# 1 "./include/linux/page_ext.h" 1 + + + + + + +# 1 "./include/linux/stackdepot.h" 1 +# 14 "./include/linux/stackdepot.h" +typedef u32 depot_stack_handle_t; + +depot_stack_handle_t stack_depot_save(unsigned long *entries, + unsigned int nr_entries, gfp_t gfp_flags); + +unsigned int stack_depot_fetch(depot_stack_handle_t handle, + unsigned long **entries); + +unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); +# 8 "./include/linux/page_ext.h" 2 + +struct pglist_data; +struct page_ext_operations { + size_t offset; + size_t size; + bool (*need)(void); + void (*init)(void); +}; + + + +enum page_ext_flags { + PAGE_EXT_OWNER, + PAGE_EXT_OWNER_ALLOCATED, + + + + +}; +# 35 "./include/linux/page_ext.h" +struct page_ext { + unsigned long flags; +}; + +extern unsigned long page_ext_size; +extern void pgdat_page_ext_init(struct pglist_data *pgdat); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ext_init_flatmem(void) +{ +} +extern void page_ext_init(void); + + + + + + + +struct page_ext *lookup_page_ext(const struct page *page); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page_ext *page_ext_next(struct page_ext *curr) +{ + void *next = curr; + next += page_ext_size; + return next; +} +# 26 "./include/linux/mm.h" 2 + +# 1 "./include/linux/page_ref.h" 1 +# 10 "./include/linux/page_ref.h" +extern struct tracepoint __tracepoint_page_ref_set; +extern struct tracepoint __tracepoint_page_ref_mod; +extern struct tracepoint __tracepoint_page_ref_mod_and_test; +extern struct tracepoint __tracepoint_page_ref_mod_and_return; +extern struct tracepoint __tracepoint_page_ref_mod_unless; +extern struct tracepoint __tracepoint_page_ref_freeze; +extern struct tracepoint __tracepoint_page_ref_unfreeze; +# 29 "./include/linux/page_ref.h" +extern void __page_ref_set(struct page *page, int v); +extern void __page_ref_mod(struct page *page, int v); +extern void __page_ref_mod_and_test(struct page *page, int v, int ret); +extern void __page_ref_mod_and_return(struct page *page, int v, int ret); +extern void __page_ref_mod_unless(struct page *page, int v, int u); +extern void __page_ref_freeze(struct page *page, int v, int ret); +extern void __page_ref_unfreeze(struct page *page, int v); +# 65 "./include/linux/page_ref.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_count(struct page *page) +{ + return atomic_read(&page->_refcount); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_count(struct page *page) +{ + return atomic_read(&compound_head(page)->_refcount); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_count(struct page *page, int v) +{ + atomic_set(&page->_refcount, v); + if (static_key_false(&(__tracepoint_page_ref_set).key)) + __page_ref_set(page, v); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_page_count(struct page *page) +{ + set_page_count(page, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_add(struct page *page, int nr) +{ + atomic_add(nr, &page->_refcount); + if (static_key_false(&(__tracepoint_page_ref_mod).key)) + __page_ref_mod(page, nr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_sub(struct page *page, int nr) +{ + atomic_sub(nr, &page->_refcount); + if (static_key_false(&(__tracepoint_page_ref_mod).key)) + __page_ref_mod(page, -nr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_sub_return(struct page *page, int nr) +{ + int ret = atomic_sub_return(nr, &page->_refcount); + + if (static_key_false(&(__tracepoint_page_ref_mod_and_return).key)) + __page_ref_mod_and_return(page, -nr, ret); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_inc(struct page *page) +{ + atomic_inc(&page->_refcount); + if (static_key_false(&(__tracepoint_page_ref_mod).key)) + __page_ref_mod(page, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_dec(struct page *page) +{ + atomic_dec(&page->_refcount); + if (static_key_false(&(__tracepoint_page_ref_mod).key)) + __page_ref_mod(page, -1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_sub_and_test(struct page *page, int nr) +{ + int ret = atomic_sub_and_test(nr, &page->_refcount); + + if (static_key_false(&(__tracepoint_page_ref_mod_and_test).key)) + __page_ref_mod_and_test(page, -nr, ret); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_inc_return(struct page *page) +{ + int ret = atomic_inc_return(&page->_refcount); + + if (static_key_false(&(__tracepoint_page_ref_mod_and_return).key)) + __page_ref_mod_and_return(page, 1, ret); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_dec_and_test(struct page *page) +{ + int ret = atomic_dec_and_test(&page->_refcount); + + if (static_key_false(&(__tracepoint_page_ref_mod_and_test).key)) + __page_ref_mod_and_test(page, -1, ret); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_dec_return(struct page *page) +{ + int ret = atomic_dec_return(&page->_refcount); + + if (static_key_false(&(__tracepoint_page_ref_mod_and_return).key)) + __page_ref_mod_and_return(page, -1, ret); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_add_unless(struct page *page, int nr, int u) +{ + int ret = atomic_add_unless(&page->_refcount, nr, u); + + if (static_key_false(&(__tracepoint_page_ref_mod_unless).key)) + __page_ref_mod_unless(page, nr, ret); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_ref_freeze(struct page *page, int count) +{ + int ret = __builtin_expect(!!(atomic_cmpxchg(&page->_refcount, count, 0) == count), 1); + + if (static_key_false(&(__tracepoint_page_ref_freeze).key)) + __page_ref_freeze(page, count, ret); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_ref_unfreeze(struct page *page, int count) +{ + do { if (__builtin_expect(!!(page_count(page) != 0), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "page_count(page) != 0"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (910)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page_ref.h"), "i" (184), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (911)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + do { if (__builtin_expect(!!(count == 0), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (912)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/page_ref.h"), "i" (185), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (913)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + atomic_set_release(&page->_refcount, count); + if (static_key_false(&(__tracepoint_page_ref_unfreeze).key)) + __page_ref_unfreeze(page, count); +} +# 28 "./include/linux/mm.h" 2 +# 1 "./include/linux/memremap.h" 1 + + + +# 1 "./include/linux/ioport.h" 1 +# 20 "./include/linux/ioport.h" +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + unsigned long flags; + unsigned long desc; + struct resource *parent, *sibling, *child; +}; +# 129 "./include/linux/ioport.h" +enum { + IORES_DESC_NONE = 0, + IORES_DESC_CRASH_KERNEL = 1, + IORES_DESC_ACPI_TABLES = 2, + IORES_DESC_ACPI_NV_STORAGE = 3, + IORES_DESC_PERSISTENT_MEMORY = 4, + IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, + IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, + IORES_DESC_RESERVED = 7, + IORES_DESC_SOFT_RESERVED = 8, +}; + + + + +enum { + IORES_MAP_SYSTEM_RAM = ((((1UL))) << (0)), + IORES_MAP_ENCRYPTED = ((((1UL))) << (1)), +}; +# 180 "./include/linux/ioport.h" +extern struct resource ioport_resource; +extern struct resource iomem_resource; + +extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); +extern int request_resource(struct resource *root, struct resource *new); +extern int release_resource(struct resource *new); +void release_child_resources(struct resource *new); +extern void reserve_region_with_split(struct resource *root, + resource_size_t start, resource_size_t end, + const char *name); +extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); +extern int insert_resource(struct resource *parent, struct resource *new); +extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); +extern int remove_resource(struct resource *old); +extern void arch_remove_reservations(struct resource *avail); +extern int allocate_resource(struct resource *root, struct resource *new, + resource_size_t size, resource_size_t min, + resource_size_t max, resource_size_t align, + resource_size_t (*alignf)(void *, + const struct resource *, + resource_size_t, + resource_size_t), + void *alignf_data); +struct resource *lookup_resource(struct resource *root, resource_size_t start); +int adjust_resource(struct resource *res, resource_size_t start, + resource_size_t size); +resource_size_t resource_alignment(struct resource *res); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) resource_size_t resource_size(const struct resource *res) +{ + return res->end - res->start + 1; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long resource_type(const struct resource *res) +{ + return res->flags & 0x00001f00; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long resource_ext_type(const struct resource *res) +{ + return res->flags & 0x01000000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool resource_contains(struct resource *r1, struct resource *r2) +{ + if (resource_type(r1) != resource_type(r2)) + return false; + if (r1->flags & 0x20000000 || r2->flags & 0x20000000) + return false; + return r1->start <= r2->start && r1->end >= r2->end; +} +# 239 "./include/linux/ioport.h" +extern struct resource * __request_region(struct resource *, + resource_size_t start, + resource_size_t n, + const char *name, int flags); + + + + + +extern void __release_region(struct resource *, resource_size_t, + resource_size_t); + +extern int release_mem_region_adjustable(struct resource *, resource_size_t, + resource_size_t); + + + +struct device; + +extern int devm_request_resource(struct device *dev, struct resource *root, + struct resource *new); +extern void devm_release_resource(struct device *dev, struct resource *new); + + + + + + +extern struct resource * __devm_request_region(struct device *dev, + struct resource *parent, resource_size_t start, + resource_size_t n, const char *name); + + + + + + +extern void __devm_release_region(struct device *dev, struct resource *parent, + resource_size_t start, resource_size_t n); +extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); +extern bool iomem_is_exclusive(u64 addr); + +extern int +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)); +extern int +walk_mem_res(u64 start, u64 end, void *arg, + int (*func)(struct resource *, void *)); +extern int +walk_system_ram_res(u64 start, u64 end, void *arg, + int (*func)(struct resource *, void *)); +extern int +walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end, + void *arg, int (*func)(struct resource *, void *)); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool resource_overlaps(struct resource *r1, struct resource *r2) +{ + return (r1->start <= r2->end && r1->end >= r2->start); +} + +struct resource *devm_request_free_mem_region(struct device *dev, + struct resource *base, unsigned long size); +struct resource *request_free_mem_region(struct resource *base, + unsigned long size, const char *name); + + +void revoke_devmem(struct resource *res); +# 5 "./include/linux/memremap.h" 2 + + +struct resource; +struct device; +# 18 "./include/linux/memremap.h" +struct vmem_altmap { + const unsigned long base_pfn; + const unsigned long end_pfn; + const unsigned long reserve; + unsigned long free; + unsigned long align; + unsigned long alloc; +}; +# 59 "./include/linux/memremap.h" +enum memory_type { + + MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_FS_DAX, + MEMORY_DEVICE_DEVDAX, + MEMORY_DEVICE_PCI_P2PDMA, +}; + +struct dev_pagemap_ops { + + + + + + void (*page_free)(struct page *page); + + + + + void (*kill)(struct dev_pagemap *pgmap); + + + + + void (*cleanup)(struct dev_pagemap *pgmap); + + + + + + vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); +}; +# 108 "./include/linux/memremap.h" +struct dev_pagemap { + struct vmem_altmap altmap; + struct resource res; + struct percpu_ref *ref; + struct percpu_ref internal_ref; + struct completion done; + enum memory_type type; + unsigned int flags; + const struct dev_pagemap_ops *ops; + void *owner; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) +{ + if (pgmap->flags & (1 << 0)) + return &pgmap->altmap; + return ((void *)0); +} + + +void *memremap_pages(struct dev_pagemap *pgmap, int nid); +void memunmap_pages(struct dev_pagemap *pgmap); +void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); +void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); +struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap); + +unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); +void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); +unsigned long memremap_compat_align(void); +# 179 "./include/linux/memremap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_dev_pagemap(struct dev_pagemap *pgmap) +{ + if (pgmap) + percpu_ref_put(pgmap->ref); +} +# 29 "./include/linux/mm.h" 2 +# 1 "./include/linux/overflow.h" 1 +# 253 "./include/linux/overflow.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (({ typeof(a) __a = (a); typeof(b) __b = (b); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })) + return (~(size_t)0); + + return bytes; +} +# 275 "./include/linux/overflow.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (({ typeof(a) __a = (a); typeof(b) __b = (b); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })) + return (~(size_t)0); + if (({ typeof(bytes) __a = (bytes); typeof(c) __b = (c); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })) + return (~(size_t)0); + + return bytes; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) size_t __ab_c_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (({ typeof(a) __a = (a); typeof(b) __b = (b); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })) + return (~(size_t)0); + if (({ typeof(bytes) __a = (bytes); typeof(c) __b = (c); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_add_overflow(__a, __b, __d); })) + return (~(size_t)0); + + return bytes; +} +# 30 "./include/linux/mm.h" 2 +# 1 "./include/linux/sizes.h" 1 +# 31 "./include/linux/mm.h" 2 + +# 1 "./include/linux/pgtable.h" 1 + + + + + +# 1 "./arch/x86/include/asm/pgtable.h" 1 +# 26 "./arch/x86/include/asm/pgtable.h" +# 1 "./arch/x86/include/asm/fpu/xstate.h" 1 +# 9 "./arch/x86/include/asm/fpu/xstate.h" +# 1 "./arch/x86/include/asm/user.h" 1 + + + + + + + +# 1 "./arch/x86/include/asm/user_64.h" 1 + + + + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 6 "./arch/x86/include/asm/user_64.h" 2 +# 51 "./arch/x86/include/asm/user_64.h" +struct user_i387_struct { + unsigned short cwd; + unsigned short swd; + unsigned short twd; + + unsigned short fop; + __u64 rip; + __u64 rdp; + __u32 mxcsr; + __u32 mxcsr_mask; + __u32 st_space[32]; + __u32 xmm_space[64]; + __u32 padding[24]; +}; + + + + +struct user_regs_struct { + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + unsigned long bp; + unsigned long bx; + unsigned long r11; + unsigned long r10; + unsigned long r9; + unsigned long r8; + unsigned long ax; + unsigned long cx; + unsigned long dx; + unsigned long si; + unsigned long di; + unsigned long orig_ax; + unsigned long ip; + unsigned long cs; + unsigned long flags; + unsigned long sp; + unsigned long ss; + unsigned long fs_base; + unsigned long gs_base; + unsigned long ds; + unsigned long es; + unsigned long fs; + unsigned long gs; +}; + + + + + +struct user { + + + struct user_regs_struct regs; + + int u_fpvalid; + + int pad0; + struct user_i387_struct i387; + + unsigned long int u_tsize; + unsigned long int u_dsize; + unsigned long int u_ssize; + unsigned long start_code; + unsigned long start_stack; + + + + long int signal; + int reserved; + int pad1; + unsigned long u_ar0; + + struct user_i387_struct *u_fpstate; + unsigned long magic; + char u_comm[32]; + unsigned long u_debugreg[8]; + unsigned long error_code; + unsigned long fault_address; +}; +# 9 "./arch/x86/include/asm/user.h" 2 + + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 12 "./arch/x86/include/asm/user.h" 2 + +struct user_ymmh_regs { + + __u32 ymmh_space[64]; +}; + +struct user_xstate_header { + __u64 xfeatures; + __u64 reserved1[2]; + __u64 reserved2[5]; +}; +# 54 "./arch/x86/include/asm/user.h" +struct user_xstateregs { + struct { + __u64 fpx_space[58]; + __u64 xstate_fx_sw[6]; + } i387; + struct user_xstate_header header; + struct user_ymmh_regs ymmh; + +}; +# 10 "./arch/x86/include/asm/fpu/xstate.h" 2 +# 54 "./arch/x86/include/asm/fpu/xstate.h" +extern u64 xfeatures_mask_all; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 xfeatures_mask_supervisor(void) +{ + return xfeatures_mask_all & (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 xfeatures_mask_user(void) +{ + return xfeatures_mask_all & ((1 << XFEATURE_FP) | (1 << XFEATURE_SSE) | (1 << XFEATURE_YMM) | (1 << XFEATURE_OPMASK) | (1 << XFEATURE_ZMM_Hi256) | (1 << XFEATURE_Hi16_ZMM) | (1 << XFEATURE_PKRU) | (1 << XFEATURE_BNDREGS) | (1 << XFEATURE_BNDCSR)); +} + +extern u64 xstate_fx_sw_bytes[6]; + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) update_regset_xstate_info(unsigned int size, + u64 xstate_mask); + +void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr); +const void *get_xsave_field_ptr(int xfeature_nr); +int using_compacted_format(void); +int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); +int copy_xstate_to_user(void *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size); +int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf); +int copy_user_to_xstate(struct xregs_state *xsave, const void *ubuf); +void copy_supervisor_to_kernel(struct xregs_state *xsave); + + +int validate_user_xstate_header(const struct xstate_header *hdr); +# 27 "./arch/x86/include/asm/pgtable.h" 2 +# 1 "./arch/x86/include/asm/fpu/api.h" 1 +# 22 "./arch/x86/include/asm/fpu/api.h" +extern void kernel_fpu_begin(void); +extern void kernel_fpu_end(void); +extern bool irq_fpu_usable(void); +extern void fpregs_mark_activate(void); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fpregs_lock(void) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + local_bh_disable(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fpregs_unlock(void) +{ + local_bh_enable(); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); +} + + +extern void fpregs_assert_state_consistent(void); + + + + + + + +extern void switch_fpu_return(void); +# 63 "./arch/x86/include/asm/fpu/api.h" +extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); +# 28 "./arch/x86/include/asm/pgtable.h" 2 +# 1 "./include/asm-generic/pgtable_uffd.h" 1 +# 29 "./arch/x86/include/asm/pgtable.h" 2 + +extern pgd_t early_top_pgt[512]; +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) __early_make_pgtable(unsigned long address, pmdval_t pmd); + +void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm); +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm, + bool user); +void ptdump_walk_pgd_level_checkwx(void); +void ptdump_walk_user_pgd_level_checkwx(void); +# 51 "./arch/x86/include/asm/pgtable.h" +extern unsigned long empty_zero_page[((1UL) << 12) / sizeof(unsigned long)] + __attribute__((__externally_visible__)); + + +extern spinlock_t pgd_lock; +extern struct list_head pgd_list; + +extern struct mm_struct *pgd_page_get_mm(struct page *page); + +extern pmdval_t early_pmd_flags; +# 125 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_dirty(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 6); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 read_pkru(void) +{ + if ((__builtin_constant_p((16*32+ 4)) && ( ((((16*32+ 4))>>5)==(0) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+ 4))>>5)==(1) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+ 4))>>5)==(2) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(3) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+ 4))>>5)==(4) && (1UL<<(((16*32+ 4))&31) & (0) )) || ((((16*32+ 4))>>5)==(5) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(6) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(7) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(8) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(9) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(10) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(11) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(12) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(13) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(14) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(15) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(16) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(17) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(18) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+ 4), (unsigned long *)((&boot_cpu_data)->x86_capability)))) + return rdpkru(); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_pkru(u32 pkru) +{ + struct pkru_state *pk; + + if (!(__builtin_constant_p((16*32+ 4)) && ( ((((16*32+ 4))>>5)==(0) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+ 4))>>5)==(1) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+ 4))>>5)==(2) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(3) && (1UL<<(((16*32+ 4))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+ 4))>>5)==(4) && (1UL<<(((16*32+ 4))&31) & (0) )) || ((((16*32+ 4))>>5)==(5) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(6) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(7) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(8) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(9) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(10) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(11) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(12) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(13) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(14) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(15) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(16) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(17) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((((16*32+ 4))>>5)==(18) && (1UL<<(((16*32+ 4))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+ 4), (unsigned long *)((&boot_cpu_data)->x86_capability)))) + return; + + pk = get_xsave_addr(&get_current()->thread.fpu.state.xsave, XFEATURE_PKRU); + + + + + + + fpregs_lock(); + if (pk) + pk->pkru = pkru; + __write_pkru(pkru); + fpregs_unlock(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_young(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 5); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_dirty(pmd_t pmd) +{ + return pmd_flags(pmd) & (((pteval_t)(1)) << 6); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_young(pmd_t pmd) +{ + return pmd_flags(pmd) & (((pteval_t)(1)) << 5); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_dirty(pud_t pud) +{ + return pud_flags(pud) & (((pteval_t)(1)) << 6); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_young(pud_t pud) +{ + return pud_flags(pud) & (((pteval_t)(1)) << 5); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_write(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_huge(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 7); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_global(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 8); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_exec(pte_t pte) +{ + return !(pte_flags(pte) & (((pteval_t)(1)) << 63)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_special(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 9); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 protnone_mask(u64 val); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pte_pfn(pte_t pte) +{ + phys_addr_t pfn = pte_val(pte); + pfn ^= protnone_mask(pfn); + return (pfn & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask))) >> 12; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pmd_pfn(pmd_t pmd) +{ + phys_addr_t pfn = pmd_val(pmd); + pfn ^= protnone_mask(pfn); + return (pfn & pmd_pfn_mask(pmd)) >> 12; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pud_pfn(pud_t pud) +{ + phys_addr_t pfn = pud_val(pud); + pfn ^= protnone_mask(pfn); + return (pfn & pud_pfn_mask(pud)) >> 12; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long p4d_pfn(p4d_t p4d) +{ + return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> 12; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pgd_pfn(pgd_t pgd) +{ + return (pgd_val(pgd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask))) >> 12; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_large(p4d_t p4d) +{ + + return 0; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_large(pmd_t pte) +{ + return pmd_flags(pte) & (((pteval_t)(1)) << 7); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_trans_huge(pmd_t pmd) +{ + return (pmd_val(pmd) & ((((pteval_t)(1)) << 7)|(((u64)(1)) << 58))) == (((pteval_t)(1)) << 7); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_trans_huge(pud_t pud) +{ + return (pud_val(pud) & ((((pteval_t)(1)) << 7)|(((u64)(1)) << 58))) == (((pteval_t)(1)) << 7); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int has_transparent_hugepage(void) +{ + return (__builtin_constant_p(( 0*32+ 3)) && ( (((( 0*32+ 3))>>5)==(0) && (1UL<<((( 0*32+ 3))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 0*32+ 3))>>5)==(1) && (1UL<<((( 0*32+ 3))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 0*32+ 3))>>5)==(2) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(3) && (1UL<<((( 0*32+ 3))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 0*32+ 3))>>5)==(4) && (1UL<<((( 0*32+ 3))&31) & (0) )) || (((( 0*32+ 3))>>5)==(5) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(6) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(7) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(8) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(9) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(10) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(11) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(12) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(13) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(14) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(15) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(16) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(17) && (1UL<<((( 0*32+ 3))&31) & 0 )) || (((( 0*32+ 3))>>5)==(18) && (1UL<<((( 0*32+ 3))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 0*32+ 3), (unsigned long *)((&boot_cpu_data)->x86_capability))); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_devmap(pmd_t pmd) +{ + return !!(pmd_val(pmd) & (((u64)(1)) << 58)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_devmap(pud_t pud) +{ + return !!(pud_val(pud) & (((u64)(1)) << 58)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_devmap(pgd_t pgd) +{ + return 0; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_set_flags(pte_t pte, pteval_t set) +{ + pteval_t v = native_pte_val(pte); + + return native_make_pte(v | set); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clear_flags(pte_t pte, pteval_t clear) +{ + pteval_t v = native_pte_val(pte); + + return native_make_pte(v & ~clear); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_uffd_wp(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 10); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkuffd_wp(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 10)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clear_uffd_wp(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 10)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkclean(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 6)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkold(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 5)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_wrprotect(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkexec(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 63)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkdirty(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkyoung(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 5)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkwrite(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkhuge(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 7)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clrhuge(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 7)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkglobal(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 8)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clrglobal(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 8)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkspecial(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 9)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mkdevmap(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 9)|(((u64)(1)) << 58)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) +{ + pmdval_t v = native_pmd_val(pmd); + + return native_make_pmd(v | set); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) +{ + pmdval_t v = native_pmd_val(pmd); + + return native_make_pmd(v & ~clear); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_uffd_wp(pmd_t pmd) +{ + return pmd_flags(pmd) & (((pteval_t)(1)) << 10); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkuffd_wp(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((pteval_t)(1)) << 10)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_clear_uffd_wp(pmd_t pmd) +{ + return pmd_clear_flags(pmd, (((pteval_t)(1)) << 10)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkold(pmd_t pmd) +{ + return pmd_clear_flags(pmd, (((pteval_t)(1)) << 5)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkclean(pmd_t pmd) +{ + return pmd_clear_flags(pmd, (((pteval_t)(1)) << 6)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_wrprotect(pmd_t pmd) +{ + return pmd_clear_flags(pmd, (((pteval_t)(1)) << 1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkdirty(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkdevmap(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((u64)(1)) << 58)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkhuge(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((pteval_t)(1)) << 7)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkyoung(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((pteval_t)(1)) << 5)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkwrite(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((pteval_t)(1)) << 1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_set_flags(pud_t pud, pudval_t set) +{ + pudval_t v = native_pud_val(pud); + + return native_make_pud(v | set); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_clear_flags(pud_t pud, pudval_t clear) +{ + pudval_t v = native_pud_val(pud); + + return native_make_pud(v & ~clear); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkold(pud_t pud) +{ + return pud_clear_flags(pud, (((pteval_t)(1)) << 5)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkclean(pud_t pud) +{ + return pud_clear_flags(pud, (((pteval_t)(1)) << 6)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_wrprotect(pud_t pud) +{ + return pud_clear_flags(pud, (((pteval_t)(1)) << 1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkdirty(pud_t pud) +{ + return pud_set_flags(pud, (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkdevmap(pud_t pud) +{ + return pud_set_flags(pud, (((u64)(1)) << 58)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkhuge(pud_t pud) +{ + return pud_set_flags(pud, (((pteval_t)(1)) << 7)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkyoung(pud_t pud) +{ + return pud_set_flags(pud, (((pteval_t)(1)) << 5)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mkwrite(pud_t pud) +{ + return pud_set_flags(pud, (((pteval_t)(1)) << 1)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_soft_dirty(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 11); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_soft_dirty(pmd_t pmd) +{ + return pmd_flags(pmd) & (((pteval_t)(1)) << 11); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_soft_dirty(pud_t pud) +{ + return pud_flags(pud) & (((pteval_t)(1)) << 11); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 11)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((pteval_t)(1)) << 11)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_mksoft_dirty(pud_t pud) +{ + return pud_set_flags(pud, (((pteval_t)(1)) << 11)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_clear_soft_dirty(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 11)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return pmd_clear_flags(pmd, (((pteval_t)(1)) << 11)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pud_clear_soft_dirty(pud_t pud) +{ + return pud_clear_flags(pud, (((pteval_t)(1)) << 11)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprotval_t massage_pgprot(pgprot_t pgprot) +{ + pgprotval_t protval = ((pgprot).pgprot); + + if (protval & (((pteval_t)(1)) << 0)) + protval &= __supported_pte_mask; + + return protval; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprotval_t check_pgprot(pgprot_t pgprot) +{ + pgprotval_t massaged_val = massage_pgprot(pgprot); + + + + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(((pgprot).pgprot) != massaged_val); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (914)); }); __warn_printk("attempted to set unsupported pgprot: %016llx " "bits: %016llx supported: %016llx\n", (u64)((pgprot).pgprot), (u64)((pgprot).pgprot) ^ massaged_val, (u64)__supported_pte_mask); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (915)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("arch/x86/include/asm/pgtable.h"), "i" (593), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (916)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (917)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (918)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }) + + + + + ; + + + return massaged_val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) +{ + phys_addr_t pfn = (phys_addr_t)page_nr << 12; + pfn ^= protnone_mask(((pgprot).pgprot)); + pfn &= ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)); + return __pte(pfn | check_pgprot(pgprot)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) +{ + phys_addr_t pfn = (phys_addr_t)page_nr << 12; + pfn ^= protnone_mask(((pgprot).pgprot)); + pfn &= (((signed long)(~(((1UL) << 21)-1))) & physical_mask); + return __pmd(pfn | check_pgprot(pgprot)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) +{ + phys_addr_t pfn = (phys_addr_t)page_nr << 12; + pfn ^= protnone_mask(((pgprot).pgprot)); + pfn &= (((signed long)(~(((1UL) << 30)-1))) & physical_mask); + return __pud(pfn | check_pgprot(pgprot)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_mkinvalid(pmd_t pmd) +{ + return pfn_pmd(pmd_pfn(pmd), + ((pgprot_t) { (pmd_flags(pmd) & ~((((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 8))) } )); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pteval_t val = pte_val(pte), oldval = val; + + + + + + val &= (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11) | (((u64)(1)) << 58) | (((pteval_t)(sme_me_mask))) | (((pteval_t)(1)) << 10)); + val |= check_pgprot(newprot) & ~(((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11) | (((u64)(1)) << 58) | (((pteval_t)(sme_me_mask))) | (((pteval_t)(1)) << 10)); + val = flip_protnone_guard(oldval, val, ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask))); + return __pte(val); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmdval_t val = pmd_val(pmd), oldval = val; + + val &= ((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11) | (((u64)(1)) << 58) | (((pteval_t)(sme_me_mask))) | (((pteval_t)(1)) << 10)) | (((pteval_t)(1)) << 7)); + val |= check_pgprot(newprot) & ~((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11) | (((u64)(1)) << 58) | (((pteval_t)(sme_me_mask))) | (((pteval_t)(1)) << 10)) | (((pteval_t)(1)) << 7)); + val = flip_protnone_guard(oldval, val, (((signed long)(~(((1UL) << 21)-1))) & physical_mask)); + return __pmd(val); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + pgprotval_t preservebits = ((oldprot).pgprot) & (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11) | (((u64)(1)) << 58) | (((pteval_t)(sme_me_mask))) | (((pteval_t)(1)) << 10)); + pgprotval_t addbits = ((newprot).pgprot) & ~(((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 11) | (((u64)(1)) << 58) | (((pteval_t)(sme_me_mask))) | (((pteval_t)(1)) << 10)); + return ((pgprot_t) { (preservebits | addbits) } ); +} +# 679 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgprot_t arch_filter_pgprot(pgprot_t prot) +{ + return ((pgprot_t) { (massage_pgprot(prot)) } ); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_new_memtype_allowed(u64 paddr, unsigned long size, + enum page_cache_mode pcm, + enum page_cache_mode new_pcm) +{ + + + + if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) + return 1; +# 702 "./arch/x86/include/asm/pgtable.h" + if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && + new_pcm == _PAGE_CACHE_MODE_WB) || + (pcm == _PAGE_CACHE_MODE_WC && + new_pcm == _PAGE_CACHE_MODE_WB) || + (pcm == _PAGE_CACHE_MODE_WT && + new_pcm == _PAGE_CACHE_MODE_WB) || + (pcm == _PAGE_CACHE_MODE_WT && + new_pcm == _PAGE_CACHE_MODE_WC)) { + return 0; + } + + return 1; +} + +pmd_t *populate_extra_pmd(unsigned long vaddr); +pte_t *populate_extra_pte(unsigned long vaddr); + + +pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) +{ + if (!( __builtin_constant_p((__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 7*32+11), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 7*32+11), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has(( 7*32+11)) )) + return pgd; + return __pti_set_user_pgtbl(pgdp, pgd); +} +# 746 "./arch/x86/include/asm/pgtable.h" +# 1 "./arch/x86/include/asm/pgtable_64.h" 1 +# 19 "./arch/x86/include/asm/pgtable_64.h" +extern p4d_t level4_kernel_pgt[512]; +extern p4d_t level4_ident_pgt[512]; +extern pud_t level3_kernel_pgt[512]; +extern pud_t level3_ident_pgt[512]; +extern pmd_t level2_kernel_pgt[512]; +extern pmd_t level2_fixmap_pgt[512]; +extern pmd_t level2_ident_pgt[512]; +extern pte_t level1_fixmap_pgt[512 * 2]; +extern pgd_t init_top_pgt[]; + + + +extern void paging_init(void); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sync_initial_page_table(void) { } +# 54 "./arch/x86/include/asm/pgtable_64.h" +struct mm_struct; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mm_p4d_folded(struct mm_struct *mm) +{ + return !(__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) )); +} + +void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); +void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pte(pte_t *ptep, pte_t pte) +{ + do { do { extern void __compiletime_assert_919(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*ptep) == sizeof(char) || sizeof(*ptep) == sizeof(short) || sizeof(*ptep) == sizeof(int) || sizeof(*ptep) == sizeof(long)) || sizeof(*ptep) == sizeof(long long))) __compiletime_assert_919(); } while (0); do { *(volatile typeof(*ptep) *)&(*ptep) = (pte); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + native_set_pte(ptep, native_make_pte(0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pte_atomic(pte_t *ptep, pte_t pte) +{ + native_set_pte(ptep, pte); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + do { do { extern void __compiletime_assert_920(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pmdp) == sizeof(char) || sizeof(*pmdp) == sizeof(short) || sizeof(*pmdp) == sizeof(int) || sizeof(*pmdp) == sizeof(long)) || sizeof(*pmdp) == sizeof(long long))) __compiletime_assert_920(); } while (0); do { *(volatile typeof(*pmdp) *)&(*pmdp) = (pmd); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pmd_clear(pmd_t *pmd) +{ + native_set_pmd(pmd, native_make_pmd(0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t native_ptep_get_and_clear(pte_t *xp) +{ + + return native_make_pte(({ typeof(&xp->pte) __ai_ptr = (&xp->pte); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = ((0)); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); })); + + + + + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t native_pmdp_get_and_clear(pmd_t *xp) +{ + + return native_make_pmd(({ typeof(&xp->pmd) __ai_ptr = (&xp->pmd); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = ((0)); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); })); + + + + + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pud(pud_t *pudp, pud_t pud) +{ + do { do { extern void __compiletime_assert_921(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pudp) == sizeof(char) || sizeof(*pudp) == sizeof(short) || sizeof(*pudp) == sizeof(int) || sizeof(*pudp) == sizeof(long)) || sizeof(*pudp) == sizeof(long long))) __compiletime_assert_921(); } while (0); do { *(volatile typeof(*pudp) *)&(*pudp) = (pud); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pud_clear(pud_t *pud) +{ + native_set_pud(pud, native_make_pud(0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t native_pudp_get_and_clear(pud_t *xp) +{ + + return native_make_pud(({ typeof(&xp->pud) __ai_ptr = (&xp->pud); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = ((0)); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); })); +# 140 "./arch/x86/include/asm/pgtable_64.h" +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + pgd_t pgd; + + if ((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) )) || !1) { + do { do { extern void __compiletime_assert_922(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*p4dp) == sizeof(char) || sizeof(*p4dp) == sizeof(short) || sizeof(*p4dp) == sizeof(int) || sizeof(*p4dp) == sizeof(long)) || sizeof(*p4dp) == sizeof(long long))) __compiletime_assert_922(); } while (0); do { *(volatile typeof(*p4dp) *)&(*p4dp) = (p4d); } while (0); } while (0); + return; + } + + pgd = native_make_pgd(native_p4d_val(p4d)); + pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); + do { do { extern void __compiletime_assert_923(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*p4dp) == sizeof(char) || sizeof(*p4dp) == sizeof(short) || sizeof(*p4dp) == sizeof(int) || sizeof(*p4dp) == sizeof(long)) || sizeof(*p4dp) == sizeof(long long))) __compiletime_assert_923(); } while (0); do { *(volatile typeof(*p4dp) *)&(*p4dp) = (native_make_p4d(native_pgd_val(pgd))); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_p4d_clear(p4d_t *p4d) +{ + native_set_p4d(p4d, native_make_p4d(0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pgd(pgd_t *pgdp, pgd_t pgd) +{ + do { do { extern void __compiletime_assert_924(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pgdp) == sizeof(char) || sizeof(*pgdp) == sizeof(short) || sizeof(*pgdp) == sizeof(int) || sizeof(*pgdp) == sizeof(long)) || sizeof(*pgdp) == sizeof(long long))) __compiletime_assert_924(); } while (0); do { *(volatile typeof(*pgdp) *)&(*pgdp) = (pti_set_user_pgtbl(pgdp, pgd)); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_pgd_clear(pgd_t *pgd) +{ + native_set_pgd(pgd, native_make_pgd(0)); +} + +extern void sync_global_pgds(unsigned long start, unsigned long end); +# 246 "./arch/x86/include/asm/pgtable_64.h" +extern int kern_addr_valid(unsigned long addr); +extern void cleanup_highmap(void); +# 263 "./arch/x86/include/asm/pgtable_64.h" +extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); +extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gup_fast_permitted(unsigned long start, unsigned long end) +{ + if (end >> ((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) )) ? 56 : 47)) + return false; + return true; +} + +# 1 "./arch/x86/include/asm/pgtable-invert.h" 1 +# 16 "./arch/x86/include/asm/pgtable-invert.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pte_needs_invert(u64 val) +{ + return val && !(val & (((pteval_t)(1)) << 0)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 protnone_mask(u64 val) +{ + return __pte_needs_invert(val) ? ~0ull : 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask) +{ + + + + + + if (__pte_needs_invert(oldval) != __pte_needs_invert(val)) + val = (val & ~mask) | (~val & mask); + return val; +} +# 275 "./arch/x86/include/asm/pgtable_64.h" 2 +# 747 "./arch/x86/include/asm/pgtable.h" 2 +# 755 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_none(pte_t pte) +{ + return !(pte.pte & ~(((((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 5)))); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_same(pte_t a, pte_t b) +{ + return a.pte == b.pte; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_present(pte_t a) +{ + return pte_flags(a) & ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_devmap(pte_t a) +{ + return (pte_flags(a) & (((u64)(1)) << 58)) == (((u64)(1)) << 58); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pte_accessible(struct mm_struct *mm, pte_t a) +{ + if (pte_flags(a) & (((pteval_t)(1)) << 0)) + return true; + + if ((pte_flags(a) & (((pteval_t)(1)) << 8)) && + mm_tlb_flush_pending(mm)) + return true; + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_present(pmd_t pmd) +{ + + + + + + + return pmd_flags(pmd) & ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8) | (((pteval_t)(1)) << 7)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_protnone(pte_t pte) +{ + return (pte_flags(pte) & ((((pteval_t)(1)) << 8) | (((pteval_t)(1)) << 0))) + == (((pteval_t)(1)) << 8); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_protnone(pmd_t pmd) +{ + return (pmd_flags(pmd) & ((((pteval_t)(1)) << 8) | (((pteval_t)(1)) << 0))) + == (((pteval_t)(1)) << 8); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_none(pmd_t pmd) +{ + + + unsigned long val = native_pmd_val(pmd); + return (val & ~((((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 5))) == 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)((void *)((unsigned long)(pmd_val(pmd) & pmd_pfn_mask(pmd))+((unsigned long)page_offset_base))); +} +# 848 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_bad(pmd_t pmd) +{ + return (pmd_flags(pmd) & ~(((pteval_t)(1)) << 2)) != ((((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 1)| 0|(((pteval_t)(1)) << 5)| 0|(((pteval_t)(1)) << 6)| 0| 0| (((pteval_t)(sme_me_mask)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pages_to_mb(unsigned long npg) +{ + return npg >> (20 - 12); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_none(pud_t pud) +{ + return (native_pud_val(pud) & ~(((((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 5)))) == 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_present(pud_t pud) +{ + return pud_flags(pud) & (((pteval_t)(1)) << 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)((void *)((unsigned long)(pud_val(pud) & pud_pfn_mask(pud))+((unsigned long)page_offset_base))); +} +# 881 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_large(pud_t pud) +{ + return (pud_val(pud) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0))) == + ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_bad(pud_t pud) +{ + return (pud_flags(pud) & ~(((((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 1)| 0|(((pteval_t)(1)) << 5)| 0|(((pteval_t)(1)) << 6)| 0| 0| (((pteval_t)(sme_me_mask)))) | (((pteval_t)(1)) << 2))) != 0; +} +# 900 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_none(p4d_t p4d) +{ + return (native_p4d_val(p4d) & ~(((((pteval_t)(1)) << 6) | (((pteval_t)(1)) << 5)))) == 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_present(p4d_t p4d) +{ + return p4d_flags(p4d) & (((pteval_t)(1)) << 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long p4d_page_vaddr(p4d_t p4d) +{ + return (unsigned long)((void *)((unsigned long)(p4d_val(p4d) & p4d_pfn_mask(p4d))+((unsigned long)page_offset_base))); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_bad(p4d_t p4d) +{ + unsigned long ignore_flags = ((((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 1)| 0|(((pteval_t)(1)) << 5)| 0|(((pteval_t)(1)) << 6)| 0| 0| (((pteval_t)(sme_me_mask)))) | (((pteval_t)(1)) << 2); + + if (1) + ignore_flags |= (((pteval_t)(1)) << 63); + + return (p4d_flags(p4d) & ~ignore_flags) != 0; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long p4d_index(unsigned long address) +{ + return (address >> 39) & (ptrs_per_p4d - 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_present(pgd_t pgd) +{ + if (!(__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) ))) + return 1; + return pgd_flags(pgd) & (((pteval_t)(1)) << 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pgd_page_vaddr(pgd_t pgd) +{ + return (unsigned long)((void *)((unsigned long)((unsigned long)pgd_val(pgd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & physical_mask)))+((unsigned long)page_offset_base))); +} +# 957 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + if (!(__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) ))) + return (p4d_t *)pgd; + return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_bad(pgd_t pgd) +{ + unsigned long ignore_flags = (((pteval_t)(1)) << 2); + + if (!(__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) ))) + return 0; + + if (1) + ignore_flags |= (((pteval_t)(1)) << 63); + + return (pgd_flags(pgd) & ~ignore_flags) != ((((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 1)| 0|(((pteval_t)(1)) << 5)| 0|(((pteval_t)(1)) << 6)| 0| 0| (((pteval_t)(sme_me_mask)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_none(pgd_t pgd) +{ + if (!(__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) ))) + return 0; + + + + + + + return !native_pgd_val(pgd); +} +# 998 "./arch/x86/include/asm/pgtable.h" +extern int direct_gbpages; +void init_mem_mapping(void); +void early_alloc_pgt_buf(void); +extern void memblock_find_dma_reserve(void); + + + +extern pgd_t trampoline_pgd_entry; + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) poking_init(void); + +unsigned long init_memory_mapping(unsigned long start, + unsigned long end, pgprot_t prot); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t native_local_ptep_get_and_clear(pte_t *ptep) +{ + pte_t res = *ptep; + + + native_pte_clear(((void *)0), 0, ptep); + return res; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) +{ + pmd_t res = *pmdp; + + native_pmd_clear(pmdp); + return res; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t native_local_pudp_get_and_clear(pud_t *pudp) +{ + pud_t res = *pudp; + + native_pud_clear(pudp); + return res; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void native_set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep , pte_t pte) +{ + native_set_pte(ptep, pte); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + set_pmd(pmdp, pmd); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pud) +{ + native_set_pud(pudp, pud); +} +# 1064 "./arch/x86/include/asm/pgtable.h" +struct vm_area_struct; + + +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); + + +extern int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep); + + +extern int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_t pte = native_ptep_get_and_clear(ptep); + return pte; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + int full) +{ + pte_t pte; + if (full) { + + + + + pte = native_local_ptep_get_and_clear(ptep); + } else { + pte = ptep_get_and_clear(mm, addr, ptep); + } + return pte; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + clear_bit(1, (unsigned long *)&ptep->pte); +} + + + + + + +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); + + +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); +extern int pudp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pud_t *pudp); + + +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_write(pmd_t pmd) +{ + return pmd_flags(pmd) & (((pteval_t)(1)) << 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp) +{ + return native_pmdp_get_and_clear(pmdp); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long addr, pud_t *pudp) +{ + return native_pudp_get_and_clear(pudp); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + clear_bit(1, (unsigned long *)pmdp); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_write(pud_t pud) +{ + return pud_flags(pud) & (((pteval_t)(1)) << 1); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + if (1) { + return ({ typeof(pmdp) __ai_ptr = (pmdp); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = ((pmd)); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); }); + } else { + pmd_t old = *pmdp; + do { do { extern void __compiletime_assert_925(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pmdp) == sizeof(char) || sizeof(*pmdp) == sizeof(short) || sizeof(*pmdp) == sizeof(int) || sizeof(*pmdp) == sizeof(long)) || sizeof(*pmdp) == sizeof(long long))) __compiletime_assert_925(); } while (0); do { *(volatile typeof(*pmdp) *)&(*pmdp) = (pmd); } while (0); } while (0); + return old; + } +} +# 1189 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pgdp_maps_userspace(void *__ptr) +{ + unsigned long ptr = (unsigned long)__ptr; + + return (((ptr & ~(~(((1UL) << 12)-1))) / sizeof(pgd_t)) < ((((1UL) << 12) / 2) / sizeof(pgd_t))); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_large(pgd_t pgd) { return 0; } +# 1212 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *ptr_set_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr |= ((((1UL))) << (bit)); + return (void *)__ptr; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *ptr_clear_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr &= ~((((1UL))) << (bit)); + return (void *)__ptr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t *kernel_to_user_pgdp(pgd_t *pgdp) +{ + return ptr_set_bit(pgdp, 12); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t *user_to_kernel_pgdp(pgd_t *pgdp) +{ + return ptr_clear_bit(pgdp, 12); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t *kernel_to_user_p4dp(p4d_t *p4dp) +{ + return ptr_set_bit(p4dp, 12); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) +{ + return ptr_clear_bit(p4dp, 12); +} +# 1258 "./arch/x86/include/asm/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) +{ + memcpy(dst, src, count * sizeof(pgd_t)); + + if (!( __builtin_constant_p((__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 7*32+11), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 7*32+11), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has(( 7*32+11)) )) + return; + + memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src), + count * sizeof(pgd_t)); + +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_level_shift(enum pg_level level) +{ + return (12 - ( __builtin_constant_p(512) ? ( __builtin_constant_p(512) ? ( (512) < 2 ? 0 : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) )) + level * ( __builtin_constant_p(512) ? ( __builtin_constant_p(512) ? ( (512) < 2 ? 0 : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) ); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_level_size(enum pg_level level) +{ + return 1UL << page_level_shift(level); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_level_mask(enum pg_level level) +{ + return ~(page_level_size(level) - 1); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_mmu_cache(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmd) +{ +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_mmu_cache_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) +{ +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_swp_soft_dirty(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 1)); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((pteval_t)(1)) << 1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_swp_soft_dirty(pmd_t pmd) +{ + return pmd_flags(pmd) & (((pteval_t)(1)) << 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd_clear_flags(pmd, (((pteval_t)(1)) << 1)); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_swp_mkuffd_wp(pte_t pte) +{ + return pte_set_flags(pte, (((pteval_t)(1)) << 2)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_swp_uffd_wp(pte_t pte) +{ + return pte_flags(pte) & (((pteval_t)(1)) << 2); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_swp_clear_uffd_wp(pte_t pte) +{ + return pte_clear_flags(pte, (((pteval_t)(1)) << 2)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_swp_mkuffd_wp(pmd_t pmd) +{ + return pmd_set_flags(pmd, (((pteval_t)(1)) << 2)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_swp_uffd_wp(pmd_t pmd) +{ + return pmd_flags(pmd) & (((pteval_t)(1)) << 2); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd) +{ + return pmd_clear_flags(pmd, (((pteval_t)(1)) << 2)); +} + + + + + + + +extern u32 init_pkru_value; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pkru_allows_read(u32 pkru, u16 pkey) +{ + int pkru_pkey_bits = pkey * 2; + return !(pkru & (0x1 << pkru_pkey_bits)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pkru_allows_write(u32 pkru, u16 pkey) +{ + int pkru_pkey_bits = pkey * 2; + + + + + return !(pkru & ((0x1|0x2) << pkru_pkey_bits)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 pte_flags_pkey(unsigned long pte_flags) +{ + + + return (pte_flags & ((((pteval_t)(1)) << 59) | (((pteval_t)(1)) << 60) | (((pteval_t)(1)) << 61) | (((pteval_t)(1)) << 62))) >> 59; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pkru_allows_pkey(u16 pkey, bool write) +{ + u32 pkru = read_pkru(); + + if (!__pkru_allows_read(pkru, pkey)) + return false; + if (write && !__pkru_allows_write(pkru, pkey)) + return false; + + return true; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __pte_access_permitted(unsigned long pteval, bool write) +{ + unsigned long need_pte_bits = (((pteval_t)(1)) << 0)|(((pteval_t)(1)) << 2); + + if (write) + need_pte_bits |= (((pteval_t)(1)) << 1); + + if ((pteval & need_pte_bits) != need_pte_bits) + return 0; + + return __pkru_allows_pkey(pte_flags_pkey(pteval), write); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pte_access_permitted(pte_t pte, bool write) +{ + return __pte_access_permitted(pte_val(pte), write); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pmd_access_permitted(pmd_t pmd, bool write) +{ + return __pte_access_permitted(pmd_val(pmd), write); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pud_access_permitted(pud_t pud, bool write) +{ + return __pte_access_permitted(pud_val(pud), write); +} + + +extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_has_pfn_modify_check(void) +{ + return (__builtin_constant_p((((19*32 + (18))))) && ( ((((((19*32 + (18)))))>>5)==(0) && (1UL<<(((((19*32 + (18)))))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((((19*32 + (18)))))>>5)==(1) && (1UL<<(((((19*32 + (18)))))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((((19*32 + (18)))))>>5)==(2) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(3) && (1UL<<(((((19*32 + (18)))))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((((19*32 + (18)))))>>5)==(4) && (1UL<<(((((19*32 + (18)))))&31) & (0) )) || ((((((19*32 + (18)))))>>5)==(5) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(6) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(7) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(8) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(9) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(10) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(11) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(12) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(13) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(14) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(15) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(16) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(17) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((((((19*32 + (18)))))>>5)==(18) && (1UL<<(((((19*32 + (18)))))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((((19*32 + (18)))), (unsigned long *)((&boot_cpu_data)->x86_capability))); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_faults_on_old_pte(void) +{ + return false; +} +# 7 "./include/linux/pgtable.h" 2 +# 43 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pte_index(unsigned long address) +{ + return (address >> 12) & (512 - 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pmd_index(unsigned long address) +{ + return (address >> 21) & (512 - 1); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long pud_index(unsigned long address) +{ + return (address >> 30) & (512 - 1); +} +# 70 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) +{ + return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); +} +# 89 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t *pmd_offset(pud_t *pud, unsigned long address) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) +{ + return (pgd + (((address) >> pgdir_shift) & (512 - 1))); +}; +# 129 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) +{ + return pmd_offset(pud_offset(p4d_offset(pgd_offset_pgd((mm)->pgd, (va)), va), va), va); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t *pmd_off_k(unsigned long va) +{ + return pmd_offset(pud_offset(p4d_offset(pgd_offset_pgd((&init_mm)->pgd, ((va))), va), va), va); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t *virt_to_kpte(unsigned long vaddr) +{ + pmd_t *pmd = pmd_off_k(vaddr); + + return pmd_none(*pmd) ? ((void *)0) : pte_offset_kernel(pmd, vaddr); +} +# 253 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t ptep_get(pte_t *ptep) +{ + return ({ do { extern void __compiletime_assert_926(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*ptep) == sizeof(char) || sizeof(*ptep) == sizeof(short) || sizeof(*ptep) == sizeof(int) || sizeof(*ptep) == sizeof(long)) || sizeof(*ptep) == sizeof(long long))) __compiletime_assert_926(); } while (0); ({ typeof( _Generic((*ptep), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*ptep))) __x = (*(const volatile typeof( _Generic((*ptep), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*ptep))) *)&(*ptep)); do { } while (0); (typeof(*ptep))__x; }); }); +} +# 285 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pud_t *pudp, + int full) +{ + return pudp_huge_get_and_clear(mm, address, pudp); +} +# 324 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_mmu_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ +} +# 337 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pte_clear_not_present_full(struct mm_struct *mm, + unsigned long address, + pte_t *ptep, + int full) +{ + pte_clear(mm, address, ptep); +} + + + +extern pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep); + + + +extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); +extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pud_t *pudp); +# 379 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t pte_sw_mkyoung(pte_t pte) +{ + return pte; +} +# 428 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + pud_t old_pud = *pudp; + + set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); +} +# 446 "./include/linux/pgtable.h" +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +# 461 "./include/linux/pgtable.h" +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); + + + +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); +# 475 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; +} + + + +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +# 503 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pte_unused(pte_t pte) +{ + return 0; +} +# 535 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return pmd_val(pmd_a) == pmd_val(pmd_b); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_same(pud_t pud_a, pud_t pud_b) +{ + return pud_val(pud_a) == pud_val(pud_b); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_same(p4d_t p4d_a, p4d_t p4d_b) +{ + return p4d_val(p4d_a) == p4d_val(p4d_b); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_same(pgd_t pgd_a, pgd_t pgd_b) +{ + return pgd_val(pgd_a) == pgd_val(pgd_b); +} +# 607 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_do_swap_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte) +{ + +} +# 625 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_unmap_one(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t orig_pte) +{ + return 0; +} +# 721 "./include/linux/pgtable.h" +void pgd_clear_bad(pgd_t *); + + +void p4d_clear_bad(p4d_t *); + + + + + +void pud_clear_bad(pud_t *); + + + + +void pmd_clear_bad(pmd_t *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_none_or_clear_bad(pgd_t *pgd) +{ + if (pgd_none(*pgd)) + return 1; + if (__builtin_expect(!!(pgd_bad(*pgd)), 0)) { + pgd_clear_bad(pgd); + return 1; + } + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int p4d_none_or_clear_bad(p4d_t *p4d) +{ + if (p4d_none(*p4d)) + return 1; + if (__builtin_expect(!!(p4d_bad(*p4d)), 0)) { + p4d_clear_bad(p4d); + return 1; + } + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_none_or_clear_bad(pud_t *pud) +{ + if (pud_none(*pud)) + return 1; + if (__builtin_expect(!!(pud_bad(*pud)), 0)) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (__builtin_expect(!!(pmd_bad(*pmd)), 0)) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + + + + + + return ptep_get_and_clear(vma->vm_mm, addr, ptep); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + + + + + set_pte_at(vma->vm_mm, addr, ptep, pte); +} +# 1019 "./include/linux/pgtable.h" +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size); +extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn); +extern int track_pfn_copy(struct vm_area_struct *vma); +extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); +extern void untrack_pfn_moved(struct vm_area_struct *vma); +# 1041 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + return pfn == zero_pfn; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long my_zero_pfn(unsigned long addr) +{ + extern unsigned long zero_pfn; + return zero_pfn; +} +# 1103 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) +{ + pud_t pudval = ({ do { extern void __compiletime_assert_927(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pud) == sizeof(char) || sizeof(*pud) == sizeof(short) || sizeof(*pud) == sizeof(int) || sizeof(*pud) == sizeof(long)) || sizeof(*pud) == sizeof(long long))) __compiletime_assert_927(); } while (0); ({ typeof( _Generic((*pud), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*pud))) __x = (*(const volatile typeof( _Generic((*pud), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*pud))) *)&(*pud)); do { } while (0); (typeof(*pud))__x; }); }); + + if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) + return 1; + if (__builtin_expect(!!(pud_bad(pudval)), 0)) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pud_trans_unstable(pud_t *pud) +{ + + + return pud_none_or_trans_huge_or_dev_or_clear_bad(pud); + + + +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + + + + + + return *pmdp; +} +# 1163 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) +{ + pmd_t pmdval = pmd_read_atomic(pmd); +# 1181 "./include/linux/pgtable.h" + __asm__ __volatile__("": : :"memory"); +# 1198 "./include/linux/pgtable.h" + if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || + (1 && !pmd_present(pmdval))) + return 1; + if (__builtin_expect(!!(pmd_bad(pmdval)), 0)) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} +# 1220 "./include/linux/pgtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pmd_trans_unstable(pmd_t *pmd) +{ + + return pmd_none_or_trans_huge_or_clear_bad(pmd); + + + +} +# 1254 "./include/linux/pgtable.h" +int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); +int p4d_clear_huge(p4d_t *p4d); +# 1267 "./include/linux/pgtable.h" +int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); +int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); +int pmd_clear_huge(pmd_t *pmd); +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); +int pud_free_pmd_page(pud_t *pud, unsigned long addr); +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); +# 1332 "./include/linux/pgtable.h" +struct file; +int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t *vma_prot); + + + + + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pgtable_cache_init(void); +# 1393 "./include/linux/pgtable.h" +typedef unsigned int pgtbl_mod_mask; +# 33 "./include/linux/mm.h" 2 + +struct mempolicy; +struct anon_vma; +struct anon_vma_chain; +struct file_ra_state; +struct user_struct; +struct writeback_control; +struct bdi_writeback; + +void init_mm_internals(void); +# 52 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_max_mapnr(unsigned long limit) { } + + +extern atomic_long_t _totalram_pages; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long totalram_pages(void) +{ + return (unsigned long)atomic_long_read(&_totalram_pages); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void totalram_pages_inc(void) +{ + atomic_long_inc(&_totalram_pages); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void totalram_pages_dec(void) +{ + atomic_long_dec(&_totalram_pages); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void totalram_pages_add(long count) +{ + atomic_long_add(count, &_totalram_pages); +} + +extern void * high_memory; +extern int page_cluster; + + +extern int sysctl_legacy_va_layout; + + + + + +extern const int mmap_rnd_bits_min; +extern const int mmap_rnd_bits_max; +extern int mmap_rnd_bits __attribute__((__section__(".data..read_mostly"))); + + +extern const int mmap_rnd_compat_bits_min; +extern const int mmap_rnd_compat_bits_max; +extern int mmap_rnd_compat_bits __attribute__((__section__(".data..read_mostly"))); +# 147 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __mm_zero_struct_page(struct page *page) +{ + unsigned long *_pp = (void *)page; + + + do { extern void __compiletime_assert_928(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(struct page) & 7"))); if (!(!(sizeof(struct page) & 7))) __compiletime_assert_928(); } while (0); + do { extern void __compiletime_assert_929(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(struct page) < 56"))); if (!(!(sizeof(struct page) < 56))) __compiletime_assert_929(); } while (0); + do { extern void __compiletime_assert_930(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(struct page) > 80"))); if (!(!(sizeof(struct page) > 80))) __compiletime_assert_930(); } while (0); + + switch (sizeof(struct page)) { + case 80: + _pp[9] = 0; + case 72: + _pp[8] = 0; + case 64: + _pp[7] = 0; + case 56: + _pp[6] = 0; + _pp[5] = 0; + _pp[4] = 0; + _pp[3] = 0; + _pp[2] = 0; + _pp[1] = 0; + _pp[0] = 0; + } +} +# 196 "./include/linux/mm.h" +extern int sysctl_max_map_count; + +extern unsigned long sysctl_user_reserve_kbytes; +extern unsigned long sysctl_admin_reserve_kbytes; + +extern int sysctl_overcommit_memory; +extern int sysctl_overcommit_ratio; +extern unsigned long sysctl_overcommit_kbytes; + +int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +# 229 "./include/linux/mm.h" +struct vm_area_struct *vm_area_alloc(struct mm_struct *); +struct vm_area_struct *vm_area_dup(struct vm_area_struct *); +void vm_area_free(struct vm_area_struct *); +# 397 "./include/linux/mm.h" +extern pgprot_t protection_map[16]; +# 463 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fault_flag_allow_retry_first(unsigned int flags) +{ + return (flags & 0x04) && + (!(flags & 0x20)); +} +# 491 "./include/linux/mm.h" +struct vm_fault { + struct vm_area_struct *vma; + unsigned int flags; + gfp_t gfp_mask; + unsigned long pgoff; + unsigned long address; + pmd_t *pmd; + + pud_t *pud; + + + pte_t orig_pte; + + struct page *cow_page; + struct page *page; + + + + + + pte_t *pte; + + + + spinlock_t *ptl; + + + + pgtable_t prealloc_pte; + + + + + + +}; + + +enum page_entry_size { + PE_SIZE_PTE = 0, + PE_SIZE_PMD, + PE_SIZE_PUD, +}; + + + + + + +struct vm_operations_struct { + void (*open)(struct vm_area_struct * area); + void (*close)(struct vm_area_struct * area); + int (*split)(struct vm_area_struct * area, unsigned long addr); + int (*mremap)(struct vm_area_struct * area); + vm_fault_t (*fault)(struct vm_fault *vmf); + vm_fault_t (*huge_fault)(struct vm_fault *vmf, + enum page_entry_size pe_size); + void (*map_pages)(struct vm_fault *vmf, + unsigned long start_pgoff, unsigned long end_pgoff); + unsigned long (*pagesize)(struct vm_area_struct * area); + + + + vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); + + + vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); + + + + + int (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); + + + + + const char *(*name)(struct vm_area_struct *vma); +# 578 "./include/linux/mm.h" + int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); +# 590 "./include/linux/mm.h" + struct mempolicy *(*get_policy)(struct vm_area_struct *vma, + unsigned long addr); + + + + + + + struct page *(*find_special_page)(struct vm_area_struct *vma, + unsigned long addr); +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) +{ + static const struct vm_operations_struct dummy_vm_ops = {}; + + memset(vma, 0, sizeof(*vma)); + vma->vm_mm = mm; + vma->vm_ops = &dummy_vm_ops; + INIT_LIST_HEAD(&vma->anon_vma_chain); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vma_set_anonymous(struct vm_area_struct *vma) +{ + vma->vm_ops = ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vma_is_anonymous(struct vm_area_struct *vma) +{ + return !vma->vm_ops; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vma_is_temporary_stack(struct vm_area_struct *vma) +{ + int maybe_stack = vma->vm_flags & (0x00000100 | 0x00000000); + + if (!maybe_stack) + return false; + + if ((vma->vm_flags & (0x00010000 | 0x00008000)) == + (0x00010000 | 0x00008000)) + return true; + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vma_is_foreign(struct vm_area_struct *vma) +{ + if (!get_current()->mm) + return true; + + if (get_current()->mm != vma->vm_mm) + return true; + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vma_is_accessible(struct vm_area_struct *vma) +{ + return vma->vm_flags & (0x00000001 | 0x00000002 | 0x00000004); +} + + + + + + +bool vma_is_shmem(struct vm_area_struct *vma); + + + + +int vma_is_stack_for_current(struct vm_area_struct *vma); + + + + +struct mmu_gather; +struct inode; + + + + + + +# 1 "./include/linux/huge_mm.h" 1 + + + + +# 1 "./include/linux/sched/coredump.h" 1 +# 17 "./include/linux/sched/coredump.h" +extern void set_dumpable(struct mm_struct *mm, int value); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __get_dumpable(unsigned long mm_flags) +{ + return mm_flags & ((1 << 2) - 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_dumpable(struct mm_struct *mm) +{ + return __get_dumpable(mm->flags); +} +# 6 "./include/linux/huge_mm.h" 2 + + + + +extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); +extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, + struct vm_area_struct *vma); +extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); +extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pud_t *dst_pud, pud_t *src_pud, unsigned long addr, + struct vm_area_struct *vma); + + +extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); + + + + + + +extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); +extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, + unsigned long addr, + pmd_t *pmd, + unsigned int flags); +extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr, unsigned long next); +extern int zap_huge_pmd(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr); +extern int zap_huge_pud(struct mmu_gather *tlb, + struct vm_area_struct *vma, + pud_t *pud, unsigned long addr); +extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned char *vec); +extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, + unsigned long new_addr, unsigned long old_end, + pmd_t *old_pmd, pmd_t *new_pmd); +extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, pgprot_t newprot, + unsigned long cp_flags); +vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, + pgprot_t pgprot, bool write); +# 64 "./include/linux/huge_mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, + bool write) +{ + return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); +} +vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, + pgprot_t pgprot, bool write); +# 83 "./include/linux/huge_mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, + bool write) +{ + return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); +} + +enum transparent_hugepage_flag { + TRANSPARENT_HUGEPAGE_FLAG, + TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, + TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, + + TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, + +}; + +struct kobject; +struct kobj_attribute; + +extern ssize_t single_hugepage_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag flag); +extern ssize_t single_hugepage_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag flag); +extern struct kobj_attribute shmem_enabled_attr; +# 127 "./include/linux/huge_mm.h" +extern unsigned long transparent_hugepage_flags; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __transparent_hugepage_enabled(struct vm_area_struct *vma) +{ + if (vma->vm_flags & 0x40000000) + return false; + + if (vma_is_temporary_stack(vma)) + return false; + + if (test_bit(24, &vma->vm_mm->flags)) + return false; + + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) + return true; + + + + + + + if (vma_is_dax(vma)) + return true; + + if (transparent_hugepage_flags & + (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) + return !!(vma->vm_flags & 0x20000000); + + return false; +} + +bool transparent_hugepage_enabled(struct vm_area_struct *vma); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool transhuge_vma_suitable(struct vm_area_struct *vma, + unsigned long haddr) +{ + + if (!vma_is_anonymous(vma)) { + if (((vma->vm_start >> 12) & ((1<<(21 -12)) - 1)) != + (vma->vm_pgoff & ((1<<(21 -12)) - 1))) + return false; + } + + if (haddr < vma->vm_start || haddr + ((1UL) << 21) > vma->vm_end) + return false; + return true; +} +# 192 "./include/linux/huge_mm.h" +extern unsigned long thp_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags); + +extern void prep_transhuge_page(struct page *page); +extern void free_transhuge_page(struct page *page); +bool is_transparent_hugepage(struct page *page); + +bool can_split_huge_page(struct page *page, int *pextra_pins); +int split_huge_page_to_list(struct page *page, struct list_head *list); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int split_huge_page(struct page *page) +{ + return split_huge_page_to_list(page, ((void *)0)); +} +void deferred_split_huge_page(struct page *page); + +void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long address, bool freeze, struct page *page); +# 221 "./include/linux/huge_mm.h" +void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, + bool freeze, struct page *page); + +void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, + unsigned long address); +# 235 "./include/linux/huge_mm.h" +extern int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice); +extern void vma_adjust_trans_huge(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, + long adjust_next); +extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, + struct vm_area_struct *vma); +extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_swap_pmd(pmd_t pmd) +{ + return !pmd_none(pmd) && !pmd_present(pmd); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, + struct vm_area_struct *vma) +{ + if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) + return __pmd_trans_huge_lock(pmd, vma); + else + return ((void *)0); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *pud_trans_huge_lock(pud_t *pud, + struct vm_area_struct *vma) +{ + if (pud_trans_huge(*pud) || pud_devmap(*pud)) + return __pud_trans_huge_lock(pud, vma); + else + return ((void *)0); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hpage_nr_pages(struct page *page) +{ + if (__builtin_expect(!!(PageTransHuge(page)), 0)) + return (1<<(21 -12)); + return 1; +} + +struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, int flags, struct dev_pagemap **pgmap); +struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, int flags, struct dev_pagemap **pgmap); + +extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); + +extern struct page *huge_zero_page; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_huge_zero_page(struct page *page) +{ + return ({ do { extern void __compiletime_assert_931(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(huge_zero_page) == sizeof(char) || sizeof(huge_zero_page) == sizeof(short) || sizeof(huge_zero_page) == sizeof(int) || sizeof(huge_zero_page) == sizeof(long)) || sizeof(huge_zero_page) == sizeof(long long))) __compiletime_assert_931(); } while (0); ({ typeof( _Generic((huge_zero_page), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (huge_zero_page))) __x = (*(const volatile typeof( _Generic((huge_zero_page), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (huge_zero_page))) *)&(huge_zero_page)); do { } while (0); (typeof(huge_zero_page))__x; }); }) == page; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_huge_zero_pmd(pmd_t pmd) +{ + return is_huge_zero_page((((struct page *)vmemmap_base) + (pmd_pfn(pmd)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_huge_zero_pud(pud_t pud) +{ + return false; +} + +struct page *mm_get_huge_zero_page(struct mm_struct *mm); +void mm_put_huge_zero_page(struct mm_struct *mm); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool thp_migration_supported(void) +{ + return 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct list_head *page_deferred_list(struct page *page) +{ + + + + + return &page[2].deferred_list; +} +# 676 "./include/linux/mm.h" 2 +# 693 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int put_page_testzero(struct page *page) +{ + do { if (__builtin_expect(!!(page_ref_count(page) == 0), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "page_ref_count(page) == 0"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (932)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (695), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (933)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + return page_ref_dec_and_test(page); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_page_unless_zero(struct page *page) +{ + return page_ref_add_unless(page, 1, 0); +} + +extern int page_is_ram(unsigned long pfn); + +enum { + REGION_INTERSECTS, + REGION_DISJOINT, + REGION_MIXED, +}; + +int region_intersects(resource_size_t offset, size_t size, unsigned long flags, + unsigned long desc); + + +struct page *vmalloc_to_page(const void *addr); +unsigned long vmalloc_to_pfn(const void *addr); +# 737 "./include/linux/mm.h" +extern bool is_vmalloc_addr(const void *x); +extern int is_vmalloc_or_module_addr(const void *x); +# 750 "./include/linux/mm.h" +extern void *kvmalloc_node(size_t size, gfp_t flags, int node); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kvmalloc(size_t size, gfp_t flags) +{ + return kvmalloc_node(size, flags, (-1)); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kvzalloc_node(size_t size, gfp_t flags, int node) +{ + return kvmalloc_node(size, flags | (( gfp_t)0x100u), node); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kvzalloc(size_t size, gfp_t flags) +{ + return kvmalloc(size, flags | (( gfp_t)0x100u)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (__builtin_expect(!!(({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })), 0)) + return ((void *)0); + + return kvmalloc(bytes, flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kvcalloc(size_t n, size_t size, gfp_t flags) +{ + return kvmalloc_array(n, size, flags | (( gfp_t)0x100u)); +} + +extern void kvfree(const void *addr); +extern void kvfree_sensitive(const void *addr, size_t len); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int compound_mapcount(struct page *page) +{ + do { if (__builtin_expect(!!(!PageCompound(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageCompound(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (934)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (789), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (935)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + page = compound_head(page); + return atomic_read(compound_mapcount_ptr(page)) + 1; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_mapcount_reset(struct page *page) +{ + atomic_set(&(page)->_mapcount, -1); +} + +int __page_mapcount(struct page *page); +# 814 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_mapcount(struct page *page) +{ + if (__builtin_expect(!!(PageCompound(page)), 0)) + return __page_mapcount(page); + return atomic_read(&page->_mapcount) + 1; +} + + +int total_mapcount(struct page *page); +int page_trans_huge_mapcount(struct page *page, int *total_mapcount); +# 839 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *virt_to_head_page(const void *x) +{ + struct page *page = (((struct page *)vmemmap_base) + (__phys_addr((unsigned long)(x)) >> 12)); + + return compound_head(page); +} + +void __put_page(struct page *page); + +void put_pages_list(struct list_head *pages); + +void split_page(struct page *page, unsigned int order); + + + + + + +typedef void compound_page_dtor(struct page *); + + +enum compound_dtor_id { + NULL_COMPOUND_DTOR, + COMPOUND_PAGE_DTOR, + + HUGETLB_PAGE_DTOR, + + + TRANSHUGE_PAGE_DTOR, + + NR_COMPOUND_DTORS, +}; +extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_compound_page_dtor(struct page *page, + enum compound_dtor_id compound_dtor) +{ + do { if (__builtin_expect(!!(compound_dtor >= NR_COMPOUND_DTORS), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "compound_dtor >= NR_COMPOUND_DTORS"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (936)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (876), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (937)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + page[1].compound_dtor = compound_dtor; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void destroy_compound_page(struct page *page) +{ + do { if (__builtin_expect(!!(page[1].compound_dtor >= NR_COMPOUND_DTORS), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "page[1].compound_dtor >= NR_COMPOUND_DTORS"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (938)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (882), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (939)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + compound_page_dtors[page[1].compound_dtor](page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int compound_order(struct page *page) +{ + if (!PageHead(page)) + return 0; + return page[1].compound_order; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hpage_pincount_available(struct page *page) +{ + + + + + + page = compound_head(page); + return PageCompound(page) && compound_order(page) > 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int compound_pincount(struct page *page) +{ + do { if (__builtin_expect(!!(!hpage_pincount_available(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!hpage_pincount_available(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (940)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (906), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (941)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + page = compound_head(page); + return atomic_read(compound_pincount_ptr(page)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_compound_order(struct page *page, unsigned int order) +{ + page[1].compound_order = order; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long compound_nr(struct page *page) +{ + return 1UL << compound_order(page); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_size(struct page *page) +{ + return ((1UL) << 12) << compound_order(page); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int page_shift(struct page *page) +{ + return 12 + compound_order(page); +} + +void free_compound_page(struct page *page); +# 943 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) +{ + if (__builtin_expect(!!(vma->vm_flags & 0x00000002), 1)) + pte = pte_mkwrite(pte); + return pte; +} + +vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page); +vm_fault_t finish_fault(struct vm_fault *vmf); +vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); +# 1058 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum zone_type page_zonenum(const struct page *page) +{ + return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 10) - 3) * (3 != 0))) & ((1UL << 3) - 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_zone_device_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_DEVICE; +} +extern void memmap_init_zone_device(struct zone *, unsigned long, + unsigned long, struct dev_pagemap *); +# 1078 "./include/linux/mm.h" +void free_devmap_managed_page(struct page *page); +extern struct static_key_false devmap_managed_key; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool page_is_devmap_managed(struct page *page) +{ + if (!({ bool branch; if (__builtin_types_compatible_p(typeof(*&devmap_managed_key), struct static_key_true)) branch = arch_static_branch_jump(&(&devmap_managed_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&devmap_managed_key), struct static_key_false)) branch = arch_static_branch(&(&devmap_managed_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); })) + return false; + if (!is_zone_device_page(page)) + return false; + switch (page->pgmap->type) { + case MEMORY_DEVICE_PRIVATE: + case MEMORY_DEVICE_FS_DAX: + return true; + default: + break; + } + return false; +} + +void put_devmap_managed_page(struct page *page); +# 1110 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_device_private_page(const struct page *page) +{ + return 1 && + 1 && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PRIVATE; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_pci_p2pdma_page(const struct page *page) +{ + return 1 && + 1 && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_page(struct page *page) +{ + page = compound_head(page); + + + + + do { if (__builtin_expect(!!(((unsigned int) page_ref_count(page) + 127u <= 127u)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "((unsigned int) page_ref_count(page) + 127u <= 127u)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (942)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (1137), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (943)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + page_ref_inc(page); +} + +bool __attribute__((__warn_unused_result__)) try_grab_page(struct page *page, unsigned int flags); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool try_get_page(struct page *page) +{ + page = compound_head(page); + if (({ int __ret_warn_on = !!(page_ref_count(page) <= 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (944)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (1146), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (945)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (946)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return false; + page_ref_inc(page); + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_page(struct page *page) +{ + page = compound_head(page); + + + + + + + + if (page_is_devmap_managed(page)) { + put_devmap_managed_page(page); + return; + } + + if (put_page_testzero(page)) + __put_page(page); +} +# 1204 "./include/linux/mm.h" +void unpin_user_page(struct page *page); +void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, + bool make_dirty); +void unpin_user_pages(struct page **pages, unsigned long npages); +# 1235 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool page_maybe_dma_pinned(struct page *page) +{ + if (hpage_pincount_available(page)) + return compound_pincount(page) > 0; +# 1248 "./include/linux/mm.h" + return ((unsigned int)page_ref_count(compound_head(page))) >= + (1U << 10); +} +# 1264 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_zone_id(struct page *page) +{ + return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 10) < ((((sizeof(unsigned long)*8) - 0) - 10) - 3))? (((sizeof(unsigned long)*8) - 0) - 10) : ((((sizeof(unsigned long)*8) - 0) - 10) - 3)) * ((10 + 3) != 0))) & ((1UL << (10 + 3)) - 1); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_to_nid(const struct page *page) +{ + struct page *p = (struct page *)page; + + return (({ do { if (__builtin_expect(!!(PagePoisoned(p)), 0)) { dump_page(p, "VM_BUG_ON_PAGE(" "PagePoisoned(p)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (947)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (1276), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (948)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); p; })->flags >> ((((sizeof(unsigned long)*8) - 0) - 10) * (10 != 0))) & ((1UL << 10) - 1); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpu_pid_to_cpupid(int cpu, int pid) +{ + return ((cpu & ((1 << 13)-1)) << 8) | (pid & ((1 << 8)-1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpupid_to_pid(int cpupid) +{ + return cpupid & ((1 << 8)-1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpupid_to_cpu(int cpupid) +{ + return (cpupid >> 8) & ((1 << 13)-1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpupid_to_nid(int cpupid) +{ + return __cpu_to_node(cpupid_to_cpu(cpupid)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpupid_pid_unset(int cpupid) +{ + return cpupid_to_pid(cpupid) == (-1 & ((1 << 8)-1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cpupid_cpu_unset(int cpupid) +{ + return cpupid_to_cpu(cpupid) == (-1 & ((1 << 13)-1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __cpupid_match_pid(pid_t task_pid, int cpupid) +{ + return (task_pid & ((1 << 8)-1)) == cpupid_to_pid(cpupid); +} +# 1332 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_cpupid_last(struct page *page) +{ + return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 10) - 3) - (8 +13)) * ((8 +13) != 0))) & ((1UL << (8 +13)) - 1); +} + +extern int page_cpupid_xchg_last(struct page *page, int cpupid); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_cpupid_reset_last(struct page *page) +{ + page->flags |= ((1UL << (8 +13)) - 1) << ((((((sizeof(unsigned long)*8) - 0) - 10) - 3) - (8 +13)) * ((8 +13) != 0)); +} +# 1407 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u8 page_kasan_tag(const struct page *page) +{ + return 0xff; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_kasan_tag_set(struct page *page, u8 tag) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_kasan_tag_reset(struct page *page) { } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct zone *page_zone(const struct page *page) +{ + return &(node_data[page_to_nid(page)])->node_zones[page_zonenum(page)]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pg_data_t *page_pgdat(const struct page *page) +{ + return (node_data[page_to_nid(page)]); +} +# 1439 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_zone(struct page *page, enum zone_type zone) +{ + page->flags &= ~(((1UL << 3) - 1) << (((((sizeof(unsigned long)*8) - 0) - 10) - 3) * (3 != 0))); + page->flags |= (zone & ((1UL << 3) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 10) - 3) * (3 != 0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_node(struct page *page, unsigned long node) +{ + page->flags &= ~(((1UL << 10) - 1) << ((((sizeof(unsigned long)*8) - 0) - 10) * (10 != 0))); + page->flags |= (node & ((1UL << 10) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 10) * (10 != 0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_links(struct page *page, enum zone_type zone, + unsigned long node, unsigned long pfn) +{ + set_page_zone(page, zone); + set_page_node(page, node); + + + +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_cgroup *page_memcg(struct page *page) +{ + return page->mem_cgroup; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_cgroup *page_memcg_rcu(struct page *page) +{ + ({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (949)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (1468), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (950)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (951)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return ({ do { extern void __compiletime_assert_952(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(page->mem_cgroup) == sizeof(char) || sizeof(page->mem_cgroup) == sizeof(short) || sizeof(page->mem_cgroup) == sizeof(int) || sizeof(page->mem_cgroup) == sizeof(long)) || sizeof(page->mem_cgroup) == sizeof(long long))) __compiletime_assert_952(); } while (0); ({ typeof( _Generic((page->mem_cgroup), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->mem_cgroup))) __x = (*(const volatile typeof( _Generic((page->mem_cgroup), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (page->mem_cgroup))) *)&(page->mem_cgroup)); do { } while (0); (typeof(page->mem_cgroup))__x; }); }); +} +# 1486 "./include/linux/mm.h" +# 1 "./include/linux/vmstat.h" 1 + + + + + + + +# 1 "./include/linux/vm_event_item.h" 1 +# 25 "./include/linux/vm_event_item.h" +enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, + PGALLOC_DMA, PGALLOC_DMA32, PGALLOC_NORMAL, PGALLOC_MOVABLE, + ALLOCSTALL_DMA, ALLOCSTALL_DMA32, ALLOCSTALL_NORMAL, ALLOCSTALL_MOVABLE, + PGSCAN_SKIP_DMA, PGSCAN_SKIP_DMA32, PGSCAN_SKIP_NORMAL, PGSCAN_SKIP_MOVABLE, + PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE, + PGFAULT, PGMAJFAULT, + PGLAZYFREED, + PGREFILL, + PGSTEAL_KSWAPD, + PGSTEAL_DIRECT, + PGSCAN_KSWAPD, + PGSCAN_DIRECT, + PGSCAN_DIRECT_THROTTLE, + PGSCAN_ANON, + PGSCAN_FILE, + PGSTEAL_ANON, + PGSTEAL_FILE, + + PGSCAN_ZONE_RECLAIM_FAILED, + + PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, + KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, + PAGEOUTRUN, PGROTATED, + DROP_PAGECACHE, DROP_SLAB, + OOM_KILL, + + NUMA_PTE_UPDATES, + NUMA_HUGE_PTE_UPDATES, + NUMA_HINT_FAULTS, + NUMA_HINT_FAULTS_LOCAL, + NUMA_PAGE_MIGRATE, + + + PGMIGRATE_SUCCESS, PGMIGRATE_FAIL, + + + COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, + COMPACTISOLATED, + COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, + KCOMPACTD_WAKE, + KCOMPACTD_MIGRATE_SCANNED, KCOMPACTD_FREE_SCANNED, + + + HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, + + UNEVICTABLE_PGCULLED, + UNEVICTABLE_PGSCANNED, + UNEVICTABLE_PGRESCUED, + UNEVICTABLE_PGMLOCKED, + UNEVICTABLE_PGMUNLOCKED, + UNEVICTABLE_PGCLEARED, + UNEVICTABLE_PGSTRANDED, + + THP_FAULT_ALLOC, + THP_FAULT_FALLBACK, + THP_FAULT_FALLBACK_CHARGE, + THP_COLLAPSE_ALLOC, + THP_COLLAPSE_ALLOC_FAILED, + THP_FILE_ALLOC, + THP_FILE_FALLBACK, + THP_FILE_FALLBACK_CHARGE, + THP_FILE_MAPPED, + THP_SPLIT_PAGE, + THP_SPLIT_PAGE_FAILED, + THP_DEFERRED_SPLIT_PAGE, + THP_SPLIT_PMD, + + THP_SPLIT_PUD, + + THP_ZERO_PAGE_ALLOC, + THP_ZERO_PAGE_ALLOC_FAILED, + THP_SWPOUT, + THP_SWPOUT_FALLBACK, + + + BALLOON_INFLATE, + BALLOON_DEFLATE, + + BALLOON_MIGRATE, + + + + NR_TLB_REMOTE_FLUSH, + NR_TLB_REMOTE_FLUSH_RECEIVED, + NR_TLB_LOCAL_FLUSH_ALL, + NR_TLB_LOCAL_FLUSH_ONE, + + + VMACACHE_FIND_CALLS, + VMACACHE_FIND_HITS, + + + SWAP_RA, + SWAP_RA_HIT, + + NR_VM_EVENT_ITEMS +}; +# 9 "./include/linux/vmstat.h" 2 + +# 1 "./include/linux/static_key.h" 1 +# 11 "./include/linux/vmstat.h" 2 + +extern int sysctl_stat_interval; + + + + +extern int sysctl_vm_numa_stat; +extern struct static_key_true vm_numa_stat_key; +int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, + void *buffer, size_t *length, loff_t *ppos); + + +struct reclaim_stat { + unsigned nr_dirty; + unsigned nr_unqueued_dirty; + unsigned nr_congested; + unsigned nr_writeback; + unsigned nr_immediate; + unsigned nr_pageout; + unsigned nr_activate[2]; + unsigned nr_ref_keep; + unsigned nr_unmap_fail; + unsigned nr_lazyfree_fail; +}; + +enum writeback_stat_item { + NR_DIRTY_THRESHOLD, + NR_DIRTY_BG_THRESHOLD, + NR_VM_WRITEBACK_STAT_ITEMS, +}; +# 53 "./include/linux/vmstat.h" +struct vm_event_state { + unsigned long event[NR_VM_EVENT_ITEMS]; +}; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_vm_event_states; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct vm_event_state) vm_event_states; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __count_vm_event(enum vm_event_item item) +{ + do { do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(vm_event_states.event[item])) { case 1: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void count_vm_event(enum vm_event_item item) +{ + do { do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(vm_event_states.event[item])) { case 1: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __count_vm_events(enum vm_event_item item, long delta) +{ + do { do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(vm_event_states.event[item])) { case 1: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(delta) && ((delta) == 1 || (delta) == -1)) ? (int)(delta) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (delta); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(delta))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(delta))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(delta) && ((delta) == 1 || (delta) == -1)) ? (int)(delta) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (delta); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(delta))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(delta))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(delta) && ((delta) == 1 || (delta) == -1)) ? (int)(delta) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (delta); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(delta))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(delta))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(delta) && ((delta) == 1 || (delta) == -1)) ? (int)(delta) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (delta); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(delta))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(delta))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void count_vm_events(enum vm_event_item item, long delta) +{ + do { do { const void *__vpp_verify = (typeof((&(vm_event_states.event[item])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(vm_event_states.event[item])) { case 1: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(delta) && ((delta) == 1 || (delta) == -1)) ? (int)(delta) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (delta); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(delta))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(delta))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(delta) && ((delta) == 1 || (delta) == -1)) ? (int)(delta) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (delta); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(delta))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(delta))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(delta) && ((delta) == 1 || (delta) == -1)) ? (int)(delta) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (delta); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(delta))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(delta))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((vm_event_states.event[item])) pao_T__; const int pao_ID__ = (__builtin_constant_p(delta) && ((delta) == 1 || (delta) == -1)) ? (int)(delta) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (delta); (void)pao_tmp__; } switch (sizeof((vm_event_states.event[item]))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "qi" ((pao_T__)(delta))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "ri" ((pao_T__)(delta))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item]))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((vm_event_states.event[item])) : "re" ((pao_T__)(delta))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +extern void all_vm_events(unsigned long *); + +extern void vm_events_fold_cpu(int cpu); +# 139 "./include/linux/vmstat.h" +extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; +extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; +extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_numa_state_add(long x, struct zone *zone, + enum numa_stat_item item) +{ + atomic_long_add(x, &zone->vm_numa_stat[item]); + atomic_long_add(x, &vm_numa_stat[item]); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long global_numa_state(enum numa_stat_item item) +{ + long x = atomic_long_read(&vm_numa_stat[item]); + + return x; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long zone_numa_state_snapshot(struct zone *zone, + enum numa_stat_item item) +{ + long x = atomic_long_read(&zone->vm_numa_stat[item]); + int cpu; + + for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (((const struct cpumask *)&__cpu_online_mask))), ((cpu)) < nr_cpu_ids;) + x += ({ do { const void *__vpp_verify = (typeof((zone->pageset) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((zone->pageset))) *)((zone->pageset)))); (typeof((typeof(*((zone->pageset))) *)((zone->pageset)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->vm_numa_stat_diff[item]; + + return x; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) +{ + atomic_long_add(x, &zone->vm_stat[item]); + atomic_long_add(x, &vm_zone_stat[item]); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void node_page_state_add(long x, struct pglist_data *pgdat, + enum node_stat_item item) +{ + atomic_long_add(x, &pgdat->vm_stat[item]); + atomic_long_add(x, &vm_node_stat[item]); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long global_zone_page_state(enum zone_stat_item item) +{ + long x = atomic_long_read(&vm_zone_stat[item]); + + if (x < 0) + x = 0; + + return x; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long global_node_page_state(enum node_stat_item item) +{ + long x = atomic_long_read(&vm_node_stat[item]); + + if (x < 0) + x = 0; + + return x; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long zone_page_state(struct zone *zone, + enum zone_stat_item item) +{ + long x = atomic_long_read(&zone->vm_stat[item]); + + if (x < 0) + x = 0; + + return x; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long zone_page_state_snapshot(struct zone *zone, + enum zone_stat_item item) +{ + long x = atomic_long_read(&zone->vm_stat[item]); + + + int cpu; + for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (((const struct cpumask *)&__cpu_online_mask))), ((cpu)) < nr_cpu_ids;) + x += ({ do { const void *__vpp_verify = (typeof((zone->pageset) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((zone->pageset))) *)((zone->pageset)))); (typeof((typeof(*((zone->pageset))) *)((zone->pageset)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->vm_stat_diff[item]; + + if (x < 0) + x = 0; + + return x; +} + + +extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); +extern unsigned long sum_zone_node_page_state(int node, + enum zone_stat_item item); +extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); +extern unsigned long node_page_state(struct pglist_data *pgdat, + enum node_stat_item item); + + + + + + +void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); +void __inc_zone_page_state(struct page *, enum zone_stat_item); +void __dec_zone_page_state(struct page *, enum zone_stat_item); + +void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); +void __inc_node_page_state(struct page *, enum node_stat_item); +void __dec_node_page_state(struct page *, enum node_stat_item); + +void mod_zone_page_state(struct zone *, enum zone_stat_item, long); +void inc_zone_page_state(struct page *, enum zone_stat_item); +void dec_zone_page_state(struct page *, enum zone_stat_item); + +void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); +void inc_node_page_state(struct page *, enum node_stat_item); +void dec_node_page_state(struct page *, enum node_stat_item); + +extern void inc_node_state(struct pglist_data *, enum node_stat_item); +extern void __inc_zone_state(struct zone *, enum zone_stat_item); +extern void __inc_node_state(struct pglist_data *, enum node_stat_item); +extern void dec_zone_state(struct zone *, enum zone_stat_item); +extern void __dec_zone_state(struct zone *, enum zone_stat_item); +extern void __dec_node_state(struct pglist_data *, enum node_stat_item); + +void quiet_vmstat(void); +void cpu_vm_stats_fold(int cpu); +void refresh_zone_stat_thresholds(void); + +struct ctl_table; +int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, + loff_t *ppos); + +void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); + +int calculate_pressure_threshold(struct zone *zone); +int calculate_normal_threshold(struct zone *zone); +void set_pgdat_percpu_threshold(pg_data_t *pgdat, + int (*calculate_pressure)(struct zone *)); +# 382 "./include/linux/vmstat.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __mod_zone_freepage_state(struct zone *zone, int nr_pages, + int migratetype) +{ + __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); + if (__builtin_expect(!!((migratetype) == MIGRATE_CMA), 0)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); +} + +extern const char * const vmstat_text[]; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *zone_stat_name(enum zone_stat_item item) +{ + return vmstat_text[item]; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *numa_stat_name(enum numa_stat_item item) +{ + return vmstat_text[NR_VM_ZONE_STAT_ITEMS + + item]; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *node_stat_name(enum node_stat_item item) +{ + return vmstat_text[NR_VM_ZONE_STAT_ITEMS + + NR_VM_NUMA_STAT_ITEMS + + item]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *lru_list_name(enum lru_list lru) +{ + return node_stat_name(NR_LRU_BASE + lru) + 3; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *writeback_stat_name(enum writeback_stat_item item) +{ + return vmstat_text[NR_VM_ZONE_STAT_ITEMS + + NR_VM_NUMA_STAT_ITEMS + + NR_VM_NODE_STAT_ITEMS + + item]; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *vm_event_name(enum vm_event_item item) +{ + return vmstat_text[NR_VM_ZONE_STAT_ITEMS + + NR_VM_NUMA_STAT_ITEMS + + NR_VM_NODE_STAT_ITEMS + + NR_VM_WRITEBACK_STAT_ITEMS + + item]; +} +# 1487 "./include/linux/mm.h" 2 + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void *lowmem_page_address(const struct page *page) +{ + return ((void *)((unsigned long)(((phys_addr_t)((unsigned long)((page) - ((struct page *)vmemmap_base))) << 12))+((unsigned long)page_offset_base))); +} +# 1521 "./include/linux/mm.h" +extern void *page_rmapping(struct page *page); +extern struct anon_vma *page_anon_vma(struct page *page); +extern struct address_space *page_mapping(struct page *page); + +extern struct address_space *__page_file_mapping(struct page *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct address_space *page_file_mapping(struct page *page) +{ + if (__builtin_expect(!!(PageSwapCache(page)), 0)) + return __page_file_mapping(page); + + return page->mapping; +} + +extern unsigned long __page_file_index(struct page *page); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_index(struct page *page) +{ + if (__builtin_expect(!!(PageSwapCache(page)), 0)) + return __page_file_index(page); + return page->index; +} + +bool page_mapped(struct page *page); +struct address_space *page_mapping(struct page *page); +struct address_space *page_mapping_file(struct page *page); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool page_is_pfmemalloc(struct page *page) +{ + + + + + return page->index == -1UL; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_page_pfmemalloc(struct page *page) +{ + page->index = -1UL; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_page_pfmemalloc(struct page *page) +{ + page->index = 0; +} + + + + +extern void pagefault_out_of_memory(void); +# 1594 "./include/linux/mm.h" +extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); + + +extern bool can_do_mlock(void); + + + +extern int user_shm_lock(size_t, struct user_struct *); +extern void user_shm_unlock(size_t, struct user_struct *); + + + + +struct zap_details { + struct address_space *check_mapping; + unsigned long first_index; + unsigned long last_index; +}; + +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t pmd); + +void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); +void zap_page_range(struct vm_area_struct *vma, unsigned long address, + unsigned long size); +void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, + unsigned long start, unsigned long end); + +struct mmu_notifier_range; + +void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, + unsigned long end, unsigned long floor, unsigned long ceiling); +int copy_page_range(struct mm_struct *dst, struct mm_struct *src, + struct vm_area_struct *vma); +int follow_pte_pmd(struct mm_struct *mm, unsigned long address, + struct mmu_notifier_range *range, + pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); +int follow_pfn(struct vm_area_struct *vma, unsigned long address, + unsigned long *pfn); +int follow_phys(struct vm_area_struct *vma, unsigned long address, + unsigned int flags, unsigned long *prot, resource_size_t *phys); +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); + +extern void truncate_pagecache(struct inode *inode, loff_t new); +extern void truncate_setsize(struct inode *inode, loff_t newsize); +void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); +void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); +int truncate_inode_page(struct address_space *mapping, struct page *page); +int generic_error_remove_page(struct address_space *mapping, struct page *page); +int invalidate_inode_page(struct page *page); + + +extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags); +extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + unsigned long address, unsigned int fault_flags, + bool *unlocked); +void unmap_mapping_pages(struct address_space *mapping, + unsigned long start, unsigned long nr, bool even_cows); +void unmap_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen, int even_cows); +# 1681 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void unmap_shared_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen) +{ + unmap_mapping_range(mapping, holebegin, holelen, 0); +} + +extern int access_process_vm(struct task_struct *tsk, unsigned long addr, + void *buf, int len, unsigned int gup_flags); +extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, + void *buf, int len, unsigned int gup_flags); +extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, + unsigned long addr, void *buf, int len, unsigned int gup_flags); + +long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *locked); +long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *locked); +long get_user_pages(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas); +long pin_user_pages(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas); +long get_user_pages_locked(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, int *locked); +long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, int *locked); +long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, + struct page **pages, unsigned int gup_flags); +long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, + struct page **pages, unsigned int gup_flags); + +int get_user_pages_fast(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); +int pin_user_pages_fast(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); + +int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); +int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, + struct task_struct *task, bool bypass_rlim); + + +struct frame_vector { + unsigned int nr_allocated; + unsigned int nr_frames; + bool got_ref; + bool is_pfns; + void *ptrs[]; + + +}; + +struct frame_vector *frame_vector_create(unsigned int nr_frames); +void frame_vector_destroy(struct frame_vector *vec); +int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, + unsigned int gup_flags, struct frame_vector *vec); +void put_vaddr_frames(struct frame_vector *vec); +int frame_vector_to_pages(struct frame_vector *vec); +void frame_vector_to_pfns(struct frame_vector *vec); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int frame_vector_count(struct frame_vector *vec) +{ + return vec->nr_frames; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page **frame_vector_pages(struct frame_vector *vec) +{ + if (vec->is_pfns) { + int err = frame_vector_to_pages(vec); + + if (err) + return ERR_PTR(err); + } + return (struct page **)(vec->ptrs); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long *frame_vector_pfns(struct frame_vector *vec) +{ + if (!vec->is_pfns) + frame_vector_to_pfns(vec); + return (unsigned long *)(vec->ptrs); +} + +struct kvec; +int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, + struct page **pages); +int get_kernel_page(unsigned long start, int write, struct page **pages); +struct page *get_dump_page(unsigned long addr); + +extern int try_to_release_page(struct page * page, gfp_t gfp_mask); +extern void do_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); + +void __set_page_dirty(struct page *, struct address_space *, int warn); +int __set_page_dirty_nobuffers(struct page *page); +int __set_page_dirty_no_writeback(struct page *page); +int redirty_page_for_writepage(struct writeback_control *wbc, + struct page *page); +void account_page_dirtied(struct page *page, struct address_space *mapping); +void account_page_cleaned(struct page *page, struct address_space *mapping, + struct bdi_writeback *wb); +int set_page_dirty(struct page *page); +int set_page_dirty_lock(struct page *page); +void __cancel_dirty_page(struct page *page); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cancel_dirty_page(struct page *page) +{ + + if (PageDirty(page)) + __cancel_dirty_page(page); +} +int clear_page_dirty_for_io(struct page *page); + +int get_cmdline(struct task_struct *task, char *buffer, int buflen); + +extern unsigned long move_page_tables(struct vm_area_struct *vma, + unsigned long old_addr, struct vm_area_struct *new_vma, + unsigned long new_addr, unsigned long len, + bool need_rmap_locks); +# 1820 "./include/linux/mm.h" +extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, + unsigned long end, pgprot_t newprot, + unsigned long cp_flags); +extern int mprotect_fixup(struct vm_area_struct *vma, + struct vm_area_struct **pprev, unsigned long start, + unsigned long end, unsigned long newflags); + + + + +int get_user_pages_fast_only(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); +int pin_user_pages_fast_only(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool get_user_page_fast_only(unsigned long addr, + unsigned int gup_flags, struct page **pagep) +{ + return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_mm_counter(struct mm_struct *mm, int member) +{ + long val = atomic_long_read(&mm->rss_stat.count[member]); + + + + + + + if (val < 0) + val = 0; + + return (unsigned long)val; +} + +void mm_trace_rss_stat(struct mm_struct *mm, int member, long count); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void add_mm_counter(struct mm_struct *mm, int member, long value) +{ + long count = atomic_long_add_return(value, &mm->rss_stat.count[member]); + + mm_trace_rss_stat(mm, member, count); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inc_mm_counter(struct mm_struct *mm, int member) +{ + long count = atomic_long_inc_return(&mm->rss_stat.count[member]); + + mm_trace_rss_stat(mm, member, count); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dec_mm_counter(struct mm_struct *mm, int member) +{ + long count = atomic_long_dec_return(&mm->rss_stat.count[member]); + + mm_trace_rss_stat(mm, member, count); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mm_counter_file(struct page *page) +{ + if (PageSwapBacked(page)) + return MM_SHMEMPAGES; + return MM_FILEPAGES; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mm_counter(struct page *page) +{ + if (PageAnon(page)) + return MM_ANONPAGES; + return mm_counter_file(page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_mm_rss(struct mm_struct *mm) +{ + return get_mm_counter(mm, MM_FILEPAGES) + + get_mm_counter(mm, MM_ANONPAGES) + + get_mm_counter(mm, MM_SHMEMPAGES); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_mm_hiwater_rss(struct mm_struct *mm) +{ + return __builtin_choose_expr(((!!(sizeof((typeof(mm->hiwater_rss) *)1 == (typeof(get_mm_rss(mm)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(mm->hiwater_rss) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(get_mm_rss(mm)) * 0l)) : (int *)8))))), ((mm->hiwater_rss) > (get_mm_rss(mm)) ? (mm->hiwater_rss) : (get_mm_rss(mm))), ({ typeof(mm->hiwater_rss) __UNIQUE_ID___x953 = (mm->hiwater_rss); typeof(get_mm_rss(mm)) __UNIQUE_ID___y954 = (get_mm_rss(mm)); ((__UNIQUE_ID___x953) > (__UNIQUE_ID___y954) ? (__UNIQUE_ID___x953) : (__UNIQUE_ID___y954)); })); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_mm_hiwater_vm(struct mm_struct *mm) +{ + return __builtin_choose_expr(((!!(sizeof((typeof(mm->hiwater_vm) *)1 == (typeof(mm->total_vm) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(mm->hiwater_vm) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(mm->total_vm) * 0l)) : (int *)8))))), ((mm->hiwater_vm) > (mm->total_vm) ? (mm->hiwater_vm) : (mm->total_vm)), ({ typeof(mm->hiwater_vm) __UNIQUE_ID___x955 = (mm->hiwater_vm); typeof(mm->total_vm) __UNIQUE_ID___y956 = (mm->total_vm); ((__UNIQUE_ID___x955) > (__UNIQUE_ID___y956) ? (__UNIQUE_ID___x955) : (__UNIQUE_ID___y956)); })); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_hiwater_rss(struct mm_struct *mm) +{ + unsigned long _rss = get_mm_rss(mm); + + if ((mm)->hiwater_rss < _rss) + (mm)->hiwater_rss = _rss; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_hiwater_vm(struct mm_struct *mm) +{ + if (mm->hiwater_vm < mm->total_vm) + mm->hiwater_vm = mm->total_vm; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void reset_mm_hiwater_rss(struct mm_struct *mm) +{ + mm->hiwater_rss = get_mm_rss(mm); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void setmax_mm_hiwater_rss(unsigned long *maxrss, + struct mm_struct *mm) +{ + unsigned long hiwater_rss = get_mm_hiwater_rss(mm); + + if (*maxrss < hiwater_rss) + *maxrss = hiwater_rss; +} + + +void sync_mm_rss(struct mm_struct *mm); +# 1968 "./include/linux/mm.h" +int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); + +extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl) +{ + pte_t *ptep; + (ptep = __get_locked_pte(mm, addr, ptl)); + return ptep; +} +# 1987 "./include/linux/mm.h" +int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +# 2000 "./include/linux/mm.h" +int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_inc_nr_puds(struct mm_struct *mm) +{ + if (0) + return; + atomic_long_add(512 * sizeof(pud_t), &mm->pgtables_bytes); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_dec_nr_puds(struct mm_struct *mm) +{ + if (0) + return; + atomic_long_sub(512 * sizeof(pud_t), &mm->pgtables_bytes); +} +# 2028 "./include/linux/mm.h" +int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_inc_nr_pmds(struct mm_struct *mm) +{ + if (0) + return; + atomic_long_add(512 * sizeof(pmd_t), &mm->pgtables_bytes); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_dec_nr_pmds(struct mm_struct *mm) +{ + if (0) + return; + atomic_long_sub(512 * sizeof(pmd_t), &mm->pgtables_bytes); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_pgtables_bytes_init(struct mm_struct *mm) +{ + atomic_long_set(&mm->pgtables_bytes, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long mm_pgtables_bytes(const struct mm_struct *mm) +{ + return atomic_long_read(&mm->pgtables_bytes); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_inc_nr_ptes(struct mm_struct *mm) +{ + atomic_long_add(512 * sizeof(pte_t), &mm->pgtables_bytes); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_dec_nr_ptes(struct mm_struct *mm) +{ + atomic_long_sub(512 * sizeof(pte_t), &mm->pgtables_bytes); +} +# 2077 "./include/linux/mm.h" +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); +int __pte_alloc_kernel(pmd_t *pmd); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, + unsigned long address) +{ + return (__builtin_expect(!!(pgd_none(*pgd)), 0) && __p4d_alloc(mm, pgd, address)) ? + ((void *)0) : p4d_offset(pgd, address); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, + unsigned long address) +{ + return (__builtin_expect(!!(p4d_none(*p4d)), 0) && __pud_alloc(mm, p4d, address)) ? + ((void *)0) : pud_offset(p4d, address); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, + unsigned long address, + pgtbl_mod_mask *mod_mask) + +{ + if (__builtin_expect(!!(pgd_none(*pgd)), 0)) { + if (__p4d_alloc(mm, pgd, address)) + return ((void *)0); + *mod_mask |= ((((1UL))) << (0)); + } + + return p4d_offset(pgd, address); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d, + unsigned long address, + pgtbl_mod_mask *mod_mask) +{ + if (__builtin_expect(!!(p4d_none(*p4d)), 0)) { + if (__pud_alloc(mm, p4d, address)) + return ((void *)0); + *mod_mask |= ((((1UL))) << (1)); + } + + return pud_offset(p4d, address); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) +{ + return (__builtin_expect(!!(pud_none(*pud)), 0) && __pmd_alloc(mm, pud, address))? + ((void *)0): pmd_offset(pud, address); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, + unsigned long address, + pgtbl_mod_mask *mod_mask) +{ + if (__builtin_expect(!!(pud_none(*pud)), 0)) { + if (__pmd_alloc(mm, pud, address)) + return ((void *)0); + *mod_mask |= ((((1UL))) << (2)); + } + + return pmd_offset(pud, address); +} + + + + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) ptlock_cache_init(void); +extern bool ptlock_alloc(struct page *page); +extern void ptlock_free(struct page *page); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *ptlock_ptr(struct page *page) +{ + return page->ptl; +} +# 2173 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return ptlock_ptr((((struct page *)vmemmap_base) + (pmd_pfn(*pmd)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ptlock_init(struct page *page) +{ + + + + + + + + do { if (__builtin_expect(!!(*(unsigned long *)&page->ptl), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "*(unsigned long *)&page->ptl"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (957)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (2187), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (958)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + if (!ptlock_alloc(page)) + return false; + do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(ptlock_ptr(page)), "ptlock_ptr(page)", &__key, LD_WAIT_CONFIG); } while (0); + return true; +} +# 2207 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pgtable_init(void) +{ + ptlock_cache_init(); + pgtable_cache_init(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pgtable_pte_page_ctor(struct page *page) +{ + if (!ptlock_init(page)) + return false; + __SetPageTable(page); + inc_zone_page_state(page, NR_PAGETABLE); + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pgtable_pte_page_dtor(struct page *page) +{ + ptlock_free(page); + __ClearPageTable(page); + dec_zone_page_state(page, NR_PAGETABLE); +} +# 2263 "./include/linux/mm.h" +static struct page *pmd_to_page(pmd_t *pmd) +{ + unsigned long mask = ~(512 * sizeof(pmd_t) - 1); + return (((struct page *)vmemmap_base) + (__phys_addr((unsigned long)((void *)((unsigned long) pmd & mask))) >> 12)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return ptlock_ptr(pmd_to_page(pmd)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pgtable_pmd_page_ctor(struct page *page) +{ + + page->pmd_huge_pte = ((void *)0); + + return ptlock_init(page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pgtable_pmd_page_dtor(struct page *page) +{ + + do { if (__builtin_expect(!!(page->pmd_huge_pte), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "page->pmd_huge_pte"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (959)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/mm.h"), "i" (2285), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (960)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + + ptlock_free(page); +} +# 2306 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) +{ + spinlock_t *ptl = pmd_lockptr(mm, pmd); + spin_lock(ptl); + return ptl; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) +{ + return &mm->page_table_lock; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) +{ + spinlock_t *ptl = pud_lockptr(mm, pud); + + spin_lock(ptl); + return ptl; +} + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pagecache_init(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) free_area_init_memoryless_node(int nid); +extern void free_initmem(void); + + + + + + + +extern unsigned long free_reserved_area(void *start, void *end, + int poison, const char *s); +# 2353 "./include/linux/mm.h" +extern void adjust_managed_page_count(struct page *page, long count); +extern void mem_init_print_info(const char *str); + +extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __free_reserved_page(struct page *page) +{ + ClearPageReserved(page); + init_page_count(page); + __free_pages((page), 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void free_reserved_page(struct page *page) +{ + __free_reserved_page(page); + adjust_managed_page_count(page, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mark_page_reserved(struct page *page) +{ + SetPageReserved(page); + adjust_managed_page_count(page, -1); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long free_initmem_default(int poison) +{ + extern char __init_begin[], __init_end[]; + + return free_reserved_area(&__init_begin, &__init_end, + poison, "unused kernel"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_num_physpages(void) +{ + int nid; + unsigned long phys_pages = 0; + + for (((nid)) = __first_node(&(node_states[N_ONLINE])); ((nid)) < (1 << 10); ((nid)) = __next_node((((nid))), &((node_states[N_ONLINE])))) + phys_pages += ((node_data[nid])->node_present_pages); + + return phys_pages; +} +# 2422 "./include/linux/mm.h" +void free_area_init(unsigned long *max_zone_pfn); +unsigned long node_map_pfn_alignment(void); +unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, + unsigned long end_pfn); +extern unsigned long absent_pages_in_range(unsigned long start_pfn, + unsigned long end_pfn); +extern void get_pfn_range_for_nid(unsigned int nid, + unsigned long *start_pfn, unsigned long *end_pfn); +extern unsigned long find_min_pfn_with_active_regions(void); +extern void sparse_memory_present_with_active_regions(int nid); +# 2440 "./include/linux/mm.h" +extern int __attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) early_pfn_to_nid(unsigned long pfn); + +extern int __attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state); + + +extern void set_dma_reserve(unsigned long new_dma_reserve); +extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, + enum memmap_context, struct vmem_altmap *); +extern void setup_per_zone_wmarks(void); +extern int __attribute__((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) init_per_zone_wmark_min(void); +extern void mem_init(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) mmap_init(void); +extern void show_mem(unsigned int flags, nodemask_t *nodemask); +extern long si_mem_available(void); +extern void si_meminfo(struct sysinfo * val); +extern void si_meminfo_node(struct sysinfo *val, int nid); + + + + +extern __attribute__((__format__(printf, 3, 4))) +void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); + +extern void setup_per_cpu_pageset(void); + + +extern int min_free_kbytes; +extern int watermark_boost_factor; +extern int watermark_scale_factor; +extern bool arch_has_descending_max_zone_pfns(void); + + +extern atomic_long_t mmap_pages_allocated; +extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); + + +void vma_interval_tree_insert(struct vm_area_struct *node, + struct rb_root_cached *root); +void vma_interval_tree_insert_after(struct vm_area_struct *node, + struct vm_area_struct *prev, + struct rb_root_cached *root); +void vma_interval_tree_remove(struct vm_area_struct *node, + struct rb_root_cached *root); +struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, + unsigned long start, unsigned long last); +struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, + unsigned long start, unsigned long last); + + + + + +void anon_vma_interval_tree_insert(struct anon_vma_chain *node, + struct rb_root_cached *root); +void anon_vma_interval_tree_remove(struct anon_vma_chain *node, + struct rb_root_cached *root); +struct anon_vma_chain * +anon_vma_interval_tree_iter_first(struct rb_root_cached *root, + unsigned long start, unsigned long last); +struct anon_vma_chain *anon_vma_interval_tree_iter_next( + struct anon_vma_chain *node, unsigned long start, unsigned long last); + +void anon_vma_interval_tree_verify(struct anon_vma_chain *node); + + + + + + + +extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); +extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long pgoff, struct vm_area_struct *insert, + struct vm_area_struct *expand); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vma_adjust(struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned long pgoff, struct vm_area_struct *insert) +{ + return __vma_adjust(vma, start, end, pgoff, insert, ((void *)0)); +} +extern struct vm_area_struct *vma_merge(struct mm_struct *, + struct vm_area_struct *prev, unsigned long addr, unsigned long end, + unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long, + struct mempolicy *, struct vm_userfaultfd_ctx); +extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); +extern int __split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); +extern int split_vma(struct mm_struct *, struct vm_area_struct *, + unsigned long addr, int new_below); +extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); +extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, + struct rb_node **, struct rb_node *); +extern void unlink_file_vma(struct vm_area_struct *); +extern struct vm_area_struct *copy_vma(struct vm_area_struct **, + unsigned long addr, unsigned long len, unsigned long pgoff, + bool *need_rmap_locks); +extern void exit_mmap(struct mm_struct *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int check_data_rlimit(unsigned long rlim, + unsigned long new, + unsigned long start, + unsigned long end_data, + unsigned long start_data) +{ + if (rlim < (~0UL)) { + if (((new - start) + (end_data - start_data)) > rlim) + return -28; + } + + return 0; +} + +extern int mm_take_all_locks(struct mm_struct *mm); +extern void mm_drop_all_locks(struct mm_struct *mm); + +extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern struct file *get_mm_exe_file(struct mm_struct *mm); +extern struct file *get_task_exe_file(struct task_struct *task); + +extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); +extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); + +extern bool vma_is_special_mapping(const struct vm_area_struct *vma, + const struct vm_special_mapping *sm); +extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long flags, + const struct vm_special_mapping *spec); + +extern int install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long flags, struct page **pages); + +unsigned long randomize_stack_top(unsigned long stack_top); + +extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + +extern unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf); +extern unsigned long do_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, unsigned long flags, + vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, + struct list_head *uf); +extern int __do_munmap(struct mm_struct *, unsigned long, size_t, + struct list_head *uf, bool downgrade); +extern int do_munmap(struct mm_struct *, unsigned long, size_t, + struct list_head *uf); +extern int do_madvise(unsigned long start, size_t len_in, int behavior); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +do_mmap_pgoff(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, unsigned long flags, + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) +{ + return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); +} + + +extern int __mm_populate(unsigned long addr, unsigned long len, + int ignore_errors); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mm_populate(unsigned long addr, unsigned long len) +{ + + (void) __mm_populate(addr, len, 1); +} + + + + + +extern int __attribute__((__warn_unused_result__)) vm_brk(unsigned long, unsigned long); +extern int __attribute__((__warn_unused_result__)) vm_brk_flags(unsigned long, unsigned long, unsigned long); +extern int vm_munmap(unsigned long, size_t); +extern unsigned long __attribute__((__warn_unused_result__)) vm_mmap(struct file *, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); + +struct vm_unmapped_area_info { + + unsigned long flags; + unsigned long length; + unsigned long low_limit; + unsigned long high_limit; + unsigned long align_mask; + unsigned long align_offset; +}; + +extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); + + +extern void truncate_inode_pages(struct address_space *, loff_t); +extern void truncate_inode_pages_range(struct address_space *, + loff_t lstart, loff_t lend); +extern void truncate_inode_pages_final(struct address_space *); + + +extern vm_fault_t filemap_fault(struct vm_fault *vmf); +extern void filemap_map_pages(struct vm_fault *vmf, + unsigned long start_pgoff, unsigned long end_pgoff); +extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); + + +int __attribute__((__warn_unused_result__)) write_one_page(struct page *page); +void task_dirty_inc(struct task_struct *tsk); + +extern unsigned long stack_guard_gap; + +extern int expand_stack(struct vm_area_struct *vma, unsigned long address); + + +extern int expand_downwards(struct vm_area_struct *vma, + unsigned long address); + + + + + + + +extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); +extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, + struct vm_area_struct **pprev); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +{ + struct vm_area_struct * vma = find_vma(mm,start_addr); + + if (vma && end_addr <= vma->vm_start) + vma = ((void *)0); + return vma; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & 0x00000100) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & 0x00000000) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -((1UL) << 12); + } + return vm_end; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long vma_pages(struct vm_area_struct *vma) +{ + return (vma->vm_end - vma->vm_start) >> 12; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct vm_area_struct *find_exact_vma(struct mm_struct *mm, + unsigned long vm_start, unsigned long vm_end) +{ + struct vm_area_struct *vma = find_vma(mm, vm_start); + + if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) + vma = ((void *)0); + + return vma; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool range_in_vma(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + return (vma && vma->vm_start <= start && end <= vma->vm_end); +} + + +pgprot_t vm_get_page_prot(unsigned long vm_flags); +void vma_set_page_prot(struct vm_area_struct *vma); +# 2738 "./include/linux/mm.h" +unsigned long change_prot_numa(struct vm_area_struct *vma, + unsigned long start, unsigned long end); + + +struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); +int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t); +int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); +int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num); +int vm_map_pages(struct vm_area_struct *vma, struct page **pages, + unsigned long num); +int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, + unsigned long num); +vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); +vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t pgprot); +vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + pfn_t pfn); +vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, + pfn_t pfn, pgprot_t pgprot); +vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, + unsigned long addr, pfn_t pfn); +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) vm_fault_t vmf_insert_page(struct vm_area_struct *vma, + unsigned long addr, struct page *page) +{ + int err = vm_insert_page(vma, addr, page); + + if (err == -12) + return VM_FAULT_OOM; + if (err < 0 && err != -16) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) vm_fault_t vmf_error(int err) +{ + if (err == -12) + return VM_FAULT_OOM; + return VM_FAULT_SIGBUS; +} + +struct page *follow_page(struct vm_area_struct *vma, unsigned long address, + unsigned int foll_flags); +# 2865 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) +{ + if (vm_fault & VM_FAULT_OOM) + return -12; + if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return (foll_flags & 0x100) ? -133 : -14; + if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -14; + return 0; +} + +typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); +extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, + unsigned long size, pte_fn_t fn, void *data); +extern int apply_to_existing_page_range(struct mm_struct *mm, + unsigned long address, unsigned long size, + pte_fn_t fn, void *data); + + +extern bool page_poisoning_enabled(void); +extern void kernel_poison_pages(struct page *page, int numpages, int enable); + + + + + + + +extern struct static_key_true init_on_alloc; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool want_init_on_alloc(gfp_t flags) +{ + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&init_on_alloc), struct static_key_true)) branch = arch_static_branch_jump(&(&init_on_alloc)->key, false); else if (__builtin_types_compatible_p(typeof(*&init_on_alloc), struct static_key_false)) branch = arch_static_branch(&(&init_on_alloc)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }) && + !page_poisoning_enabled()) + return true; + return flags & (( gfp_t)0x100u); +} + + +extern struct static_key_true init_on_free; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool want_init_on_free(void) +{ + return ({ bool branch; if (__builtin_types_compatible_p(typeof(*&init_on_free), struct static_key_true)) branch = arch_static_branch_jump(&(&init_on_free)->key, false); else if (__builtin_types_compatible_p(typeof(*&init_on_free), struct static_key_false)) branch = arch_static_branch(&(&init_on_free)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }) && + !page_poisoning_enabled(); +} + + +extern void init_debug_pagealloc(void); + + + +extern bool _debug_pagealloc_enabled_early; +extern struct static_key_false _debug_pagealloc_enabled; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool debug_pagealloc_enabled(void) +{ + return 1 && + _debug_pagealloc_enabled_early; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool debug_pagealloc_enabled_static(void) +{ + if (!1) + return false; + + return ({ bool branch; if (__builtin_types_compatible_p(typeof(*&_debug_pagealloc_enabled), struct static_key_true)) branch = arch_static_branch_jump(&(&_debug_pagealloc_enabled)->key, false); else if (__builtin_types_compatible_p(typeof(*&_debug_pagealloc_enabled), struct static_key_false)) branch = arch_static_branch(&(&_debug_pagealloc_enabled)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }); +} + + +extern void __kernel_map_pages(struct page *page, int numpages, int enable); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +kernel_map_pages(struct page *page, int numpages, int enable) +{ + __kernel_map_pages(page, numpages, enable); +} + +extern bool kernel_page_present(struct page *page); +# 2966 "./include/linux/mm.h" +extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); +extern int in_gate_area_no_mm(unsigned long addr); +extern int in_gate_area(struct mm_struct *mm, unsigned long addr); +# 2981 "./include/linux/mm.h" +extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); + + +extern int sysctl_drop_caches; +int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); + + +void drop_slab(void); +void drop_slab_node(int nid); + + + + +extern int randomize_va_space; + + +const char * arch_vma_name(struct vm_area_struct *vma); + +void print_vma_addr(char *prefix, unsigned long rip); + + + + + + +void *sparse_buffer_alloc(unsigned long size); +struct page * __populate_section_memmap(unsigned long pfn, + unsigned long nr_pages, int nid, struct vmem_altmap *altmap); +pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); +p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); +pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); +pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); +void *vmemmap_alloc_block(unsigned long size, int node); +struct vmem_altmap; +void *vmemmap_alloc_block_buf(unsigned long size, int node); +void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); +void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); +int vmemmap_populate_basepages(unsigned long start, unsigned long end, + int node); +int vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap); +void vmemmap_populate_print_last(void); + +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap); + +void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, + unsigned long nr_pages); + +enum mf_flags { + MF_COUNT_INCREASED = 1 << 0, + MF_ACTION_REQUIRED = 1 << 1, + MF_MUST_KILL = 1 << 2, + MF_SOFT_OFFLINE = 1 << 3, +}; +extern int memory_failure(unsigned long pfn, int flags); +extern void memory_failure_queue(unsigned long pfn, int flags); +extern void memory_failure_queue_kick(int cpu); +extern int unpoison_memory(unsigned long pfn); +extern int get_hwpoison_page(struct page *page); + +extern int sysctl_memory_failure_early_kill; +extern int sysctl_memory_failure_recovery; +extern void shake_page(struct page *p, int access); +extern atomic_long_t num_poisoned_pages __attribute__((__section__(".data..read_mostly"))); +extern int soft_offline_page(unsigned long pfn, int flags); + + + + + +enum mf_result { + MF_IGNORED, + MF_FAILED, + MF_DELAYED, + MF_RECOVERED, +}; + +enum mf_action_page_type { + MF_MSG_KERNEL, + MF_MSG_KERNEL_HIGH_ORDER, + MF_MSG_SLAB, + MF_MSG_DIFFERENT_COMPOUND, + MF_MSG_POISONED_HUGE, + MF_MSG_HUGE, + MF_MSG_FREE_HUGE, + MF_MSG_NON_PMD_HUGE, + MF_MSG_UNMAP_FAILED, + MF_MSG_DIRTY_SWAPCACHE, + MF_MSG_CLEAN_SWAPCACHE, + MF_MSG_DIRTY_MLOCKED_LRU, + MF_MSG_CLEAN_MLOCKED_LRU, + MF_MSG_DIRTY_UNEVICTABLE_LRU, + MF_MSG_CLEAN_UNEVICTABLE_LRU, + MF_MSG_DIRTY_LRU, + MF_MSG_CLEAN_LRU, + MF_MSG_TRUNCATED_LRU, + MF_MSG_BUDDY, + MF_MSG_BUDDY_2ND, + MF_MSG_DAX, + MF_MSG_UNKNOWN, +}; + + +extern void clear_huge_page(struct page *page, + unsigned long addr_hint, + unsigned int pages_per_huge_page); +extern void copy_user_huge_page(struct page *dst, struct page *src, + unsigned long addr_hint, + struct vm_area_struct *vma, + unsigned int pages_per_huge_page); +extern long copy_huge_page_from_user(struct page *dst_page, + const void *usr_src, + unsigned int pages_per_huge_page, + bool allow_pagefault); +# 3109 "./include/linux/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vma_is_special_huge(const struct vm_area_struct *vma) +{ + return vma_is_dax(vma) || (vma->vm_file && + (vma->vm_flags & (0x00000400 | 0x10000000))); +} + + + + +extern unsigned int _debug_guardpage_minorder; +extern struct static_key_false _debug_guardpage_enabled; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int debug_guardpage_minorder(void) +{ + return _debug_guardpage_minorder; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool debug_guardpage_enabled(void) +{ + return ({ bool branch; if (__builtin_types_compatible_p(typeof(*&_debug_guardpage_enabled), struct static_key_true)) branch = arch_static_branch_jump(&(&_debug_guardpage_enabled)->key, false); else if (__builtin_types_compatible_p(typeof(*&_debug_guardpage_enabled), struct static_key_false)) branch = arch_static_branch(&(&_debug_guardpage_enabled)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool page_is_guard(struct page *page) +{ + if (!debug_guardpage_enabled()) + return false; + + return PageGuard(page); +} + + + + + + + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) setup_nr_node_ids(void); + + + + +extern int memcmp_pages(struct page *page1, struct page *page2); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pages_identical(struct page *page1, struct page *page2) +{ + return !memcmp_pages(page1, page2); +} + + +unsigned long clean_record_shared_mapping_range(struct address_space *mapping, + unsigned long first_index, unsigned long nr, + unsigned long bitmap_pgoff, + unsigned long *bitmap, + unsigned long *start, + unsigned long *end); + +unsigned long wp_shared_mapping_range(struct address_space *mapping, + unsigned long first_index, unsigned long nr); + + +extern int sysctl_nr_trim_pages; +# 6 "./include/linux/ring_buffer.h" 2 +# 1 "./include/linux/seq_file.h" 1 +# 14 "./include/linux/seq_file.h" +struct seq_operations; + +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + loff_t index; + loff_t read_pos; + struct mutex lock; + const struct seq_operations *op; + int poll_event; + const struct file *file; + void *private; +}; + +struct seq_operations { + void * (*start) (struct seq_file *m, loff_t *pos); + void (*stop) (struct seq_file *m, void *v); + void * (*next) (struct seq_file *m, void *v, loff_t *pos); + int (*show) (struct seq_file *m, void *v); +}; +# 50 "./include/linux/seq_file.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool seq_has_overflowed(struct seq_file *m) +{ + return m->count == m->size; +} +# 63 "./include/linux/seq_file.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) size_t seq_get_buf(struct seq_file *m, char **bufp) +{ + do { if (__builtin_expect(!!(m->count > m->size), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (961)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/seq_file.h"), "i" (65), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (962)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (m->count < m->size) + *bufp = m->buf + m->count; + else + *bufp = ((void *)0); + + return m->size - m->count; +} +# 83 "./include/linux/seq_file.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void seq_commit(struct seq_file *m, int num) +{ + if (num < 0) { + m->count = m->size; + } else { + do { if (__builtin_expect(!!(m->count + num > m->size), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (963)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/seq_file.h"), "i" (88), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (964)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + m->count += num; + } +} +# 101 "./include/linux/seq_file.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void seq_setwidth(struct seq_file *m, size_t size) +{ + m->pad_until = m->count + size; +} +void seq_pad(struct seq_file *m, char c); + +char *mangle_path(char *s, const char *p, const char *esc); +int seq_open(struct file *, const struct seq_operations *); +ssize_t seq_read(struct file *, char *, size_t, loff_t *); +loff_t seq_lseek(struct file *, loff_t, int); +int seq_release(struct inode *, struct file *); +int seq_write(struct seq_file *seq, const void *data, size_t len); + +__attribute__((__format__(printf, 2, 0))) +void seq_vprintf(struct seq_file *m, const char *fmt, va_list args); +__attribute__((__format__(printf, 2, 3))) +void seq_printf(struct seq_file *m, const char *fmt, ...); +void seq_putc(struct seq_file *m, char c); +void seq_puts(struct seq_file *m, const char *s); +void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, + unsigned long long num, unsigned int width); +void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, + unsigned long long num); +void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num); +void seq_put_hex_ll(struct seq_file *m, const char *delimiter, + unsigned long long v, unsigned int width); + +void seq_escape(struct seq_file *m, const char *s, const char *esc); +void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz); + +void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, + int rowsize, int groupsize, const void *buf, size_t len, + bool ascii); + +int seq_path(struct seq_file *, const struct path *, const char *); +int seq_file_path(struct seq_file *, struct file *, const char *); +int seq_dentry(struct seq_file *, struct dentry *, const char *); +int seq_path_root(struct seq_file *m, const struct path *path, + const struct path *root, const char *esc); + +int single_open(struct file *, int (*)(struct seq_file *, void *), void *); +int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); +int single_release(struct inode *, struct file *); +void *__seq_open_private(struct file *, const struct seq_operations *, int); +int seq_open_private(struct file *, const struct seq_operations *, int); +int seq_release_private(struct inode *, struct file *); +# 194 "./include/linux/seq_file.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct user_namespace *seq_user_ns(struct seq_file *seq) +{ + + return seq->file->f_cred->user_ns; + + + + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void seq_show_option(struct seq_file *m, const char *name, + const char *value) +{ + seq_putc(m, ','); + seq_escape(m, name, ",= \t\n\\"); + if (value) { + seq_putc(m, '='); + seq_escape(m, value, ", \t\n\\"); + } +} +# 244 "./include/linux/seq_file.h" +extern struct list_head *seq_list_start(struct list_head *head, + loff_t pos); +extern struct list_head *seq_list_start_head(struct list_head *head, + loff_t pos); +extern struct list_head *seq_list_next(void *v, struct list_head *head, + loff_t *ppos); + + + + + +extern struct hlist_node *seq_hlist_start(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, + loff_t *ppos); + +extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, + loff_t pos); +extern struct hlist_node *seq_hlist_next_rcu(void *v, + struct hlist_head *head, + loff_t *ppos); + + +extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head *head, int *cpu, loff_t pos); + +extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head *head, int *cpu, loff_t *pos); + +void seq_file_init(void); +# 7 "./include/linux/ring_buffer.h" 2 +# 1 "./include/linux/poll.h" 1 +# 13 "./include/linux/poll.h" +# 1 "./include/uapi/linux/poll.h" 1 +# 1 "./arch/x86/include/generated/uapi/asm/poll.h" 1 +# 1 "./include/uapi/asm-generic/poll.h" 1 +# 36 "./include/uapi/asm-generic/poll.h" +struct pollfd { + int fd; + short events; + short revents; +}; +# 1 "./arch/x86/include/generated/uapi/asm/poll.h" 2 +# 1 "./include/uapi/linux/poll.h" 2 +# 14 "./include/linux/poll.h" 2 +# 1 "./include/uapi/linux/eventpoll.h" 1 +# 77 "./include/uapi/linux/eventpoll.h" +struct epoll_event { + __poll_t events; + __u64 data; +} __attribute__((packed)); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ep_take_care_of_epollwakeup(struct epoll_event *epev) +{ + if ((epev->events & (( __poll_t)(1U << 29))) && !capable(36)) + epev->events &= ~(( __poll_t)(1U << 29)); +} +# 15 "./include/linux/poll.h" 2 + +extern struct ctl_table epoll_table[]; +# 32 "./include/linux/poll.h" +struct poll_table_struct; + + + + +typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); + + + + + +typedef struct poll_table_struct { + poll_queue_proc _qproc; + __poll_t _key; +} poll_table; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) +{ + if (p && p->_qproc && wait_address) + p->_qproc(filp, wait_address, p); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool poll_does_not_wait(const poll_table *p) +{ + return p == ((void *)0) || p->_qproc == ((void *)0); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __poll_t poll_requested_events(const poll_table *p) +{ + return p ? p->_key : ~(__poll_t)0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) +{ + pt->_qproc = qproc; + pt->_key = ~(__poll_t)0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool file_can_poll(struct file *file) +{ + return file->f_op->poll; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) +{ + if (__builtin_expect(!!(!file->f_op->poll), 0)) + return (( __poll_t)0x00000001 | ( __poll_t)0x00000004 | ( __poll_t)0x00000040 | ( __poll_t)0x00000100); + return file->f_op->poll(file, pt); +} + +struct poll_table_entry { + struct file *filp; + __poll_t key; + wait_queue_entry_t wait; + wait_queue_head_t *wait_address; +}; + + + + +struct poll_wqueues { + poll_table pt; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; + int error; + int inline_index; + struct poll_table_entry inline_entries[((832 - 256) / sizeof(struct poll_table_entry))]; +}; + +extern void poll_initwait(struct poll_wqueues *pwq); +extern void poll_freewait(struct poll_wqueues *pwq); +extern u64 select_estimate_accuracy(struct timespec64 *tv); + + + +extern int core_sys_select(int n, fd_set *inp, fd_set *outp, + fd_set *exp, struct timespec64 *end_time); + +extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, + long nsec); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u16 mangle_poll(__poll_t val) +{ + __u16 v = ( __u16)val; + + return (( __u16)( __poll_t)0x00000001 < 0x0001 ? (v & ( __u16)( __poll_t)0x00000001) * (0x0001/( __u16)( __poll_t)0x00000001) : (v & ( __u16)( __poll_t)0x00000001) / (( __u16)( __poll_t)0x00000001/0x0001)) | (( __u16)( __poll_t)0x00000004 < 0x0004 ? (v & ( __u16)( __poll_t)0x00000004) * (0x0004/( __u16)( __poll_t)0x00000004) : (v & ( __u16)( __poll_t)0x00000004) / (( __u16)( __poll_t)0x00000004/0x0004)) | (( __u16)( __poll_t)0x00000002 < 0x0002 ? (v & ( __u16)( __poll_t)0x00000002) * (0x0002/( __u16)( __poll_t)0x00000002) : (v & ( __u16)( __poll_t)0x00000002) / (( __u16)( __poll_t)0x00000002/0x0002)) | (( __u16)( __poll_t)0x00000008 < 0x0008 ? (v & ( __u16)( __poll_t)0x00000008) * (0x0008/( __u16)( __poll_t)0x00000008) : (v & ( __u16)( __poll_t)0x00000008) / (( __u16)( __poll_t)0x00000008/0x0008)) | (( __u16)( __poll_t)0x00000020 < 0x0020 ? (v & ( __u16)( __poll_t)0x00000020) * (0x0020/( __u16)( __poll_t)0x00000020) : (v & ( __u16)( __poll_t)0x00000020) / (( __u16)( __poll_t)0x00000020/0x0020)) | + (( __u16)( __poll_t)0x00000040 < 0x0040 ? (v & ( __u16)( __poll_t)0x00000040) * (0x0040/( __u16)( __poll_t)0x00000040) : (v & ( __u16)( __poll_t)0x00000040) / (( __u16)( __poll_t)0x00000040/0x0040)) | (( __u16)( __poll_t)0x00000080 < 0x0080 ? (v & ( __u16)( __poll_t)0x00000080) * (0x0080/( __u16)( __poll_t)0x00000080) : (v & ( __u16)( __poll_t)0x00000080) / (( __u16)( __poll_t)0x00000080/0x0080)) | (( __u16)( __poll_t)0x00000100 < 0x0100 ? (v & ( __u16)( __poll_t)0x00000100) * (0x0100/( __u16)( __poll_t)0x00000100) : (v & ( __u16)( __poll_t)0x00000100) / (( __u16)( __poll_t)0x00000100/0x0100)) | (( __u16)( __poll_t)0x00000200 < 0x0200 ? (v & ( __u16)( __poll_t)0x00000200) * (0x0200/( __u16)( __poll_t)0x00000200) : (v & ( __u16)( __poll_t)0x00000200) / (( __u16)( __poll_t)0x00000200/0x0200)) | + (( __u16)( __poll_t)0x00000010 < 0x0010 ? (v & ( __u16)( __poll_t)0x00000010) * (0x0010/( __u16)( __poll_t)0x00000010) : (v & ( __u16)( __poll_t)0x00000010) / (( __u16)( __poll_t)0x00000010/0x0010)) | (( __u16)( __poll_t)0x00002000 < 0x2000 ? (v & ( __u16)( __poll_t)0x00002000) * (0x2000/( __u16)( __poll_t)0x00002000) : (v & ( __u16)( __poll_t)0x00002000) / (( __u16)( __poll_t)0x00002000/0x2000)) | (( __u16)( __poll_t)0x00000400 < 0x0400 ? (v & ( __u16)( __poll_t)0x00000400) * (0x0400/( __u16)( __poll_t)0x00000400) : (v & ( __u16)( __poll_t)0x00000400) / (( __u16)( __poll_t)0x00000400/0x0400)); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __poll_t demangle_poll(u16 val) +{ + + return ( __poll_t)(0x0001 < ( __u16)( __poll_t)0x00000001 ? (val & 0x0001) * (( __u16)( __poll_t)0x00000001/0x0001) : (val & 0x0001) / (0x0001/( __u16)( __poll_t)0x00000001)) | ( __poll_t)(0x0004 < ( __u16)( __poll_t)0x00000004 ? (val & 0x0004) * (( __u16)( __poll_t)0x00000004/0x0004) : (val & 0x0004) / (0x0004/( __u16)( __poll_t)0x00000004)) | ( __poll_t)(0x0002 < ( __u16)( __poll_t)0x00000002 ? (val & 0x0002) * (( __u16)( __poll_t)0x00000002/0x0002) : (val & 0x0002) / (0x0002/( __u16)( __poll_t)0x00000002)) | ( __poll_t)(0x0008 < ( __u16)( __poll_t)0x00000008 ? (val & 0x0008) * (( __u16)( __poll_t)0x00000008/0x0008) : (val & 0x0008) / (0x0008/( __u16)( __poll_t)0x00000008)) | ( __poll_t)(0x0020 < ( __u16)( __poll_t)0x00000020 ? (val & 0x0020) * (( __u16)( __poll_t)0x00000020/0x0020) : (val & 0x0020) / (0x0020/( __u16)( __poll_t)0x00000020)) | + ( __poll_t)(0x0040 < ( __u16)( __poll_t)0x00000040 ? (val & 0x0040) * (( __u16)( __poll_t)0x00000040/0x0040) : (val & 0x0040) / (0x0040/( __u16)( __poll_t)0x00000040)) | ( __poll_t)(0x0080 < ( __u16)( __poll_t)0x00000080 ? (val & 0x0080) * (( __u16)( __poll_t)0x00000080/0x0080) : (val & 0x0080) / (0x0080/( __u16)( __poll_t)0x00000080)) | ( __poll_t)(0x0100 < ( __u16)( __poll_t)0x00000100 ? (val & 0x0100) * (( __u16)( __poll_t)0x00000100/0x0100) : (val & 0x0100) / (0x0100/( __u16)( __poll_t)0x00000100)) | ( __poll_t)(0x0200 < ( __u16)( __poll_t)0x00000200 ? (val & 0x0200) * (( __u16)( __poll_t)0x00000200/0x0200) : (val & 0x0200) / (0x0200/( __u16)( __poll_t)0x00000200)) | + ( __poll_t)(0x0010 < ( __u16)( __poll_t)0x00000010 ? (val & 0x0010) * (( __u16)( __poll_t)0x00000010/0x0010) : (val & 0x0010) / (0x0010/( __u16)( __poll_t)0x00000010)) | ( __poll_t)(0x2000 < ( __u16)( __poll_t)0x00002000 ? (val & 0x2000) * (( __u16)( __poll_t)0x00002000/0x2000) : (val & 0x2000) / (0x2000/( __u16)( __poll_t)0x00002000)) | ( __poll_t)(0x0400 < ( __u16)( __poll_t)0x00000400 ? (val & 0x0400) * (( __u16)( __poll_t)0x00000400/0x0400) : (val & 0x0400) / (0x0400/( __u16)( __poll_t)0x00000400)); + +} +# 8 "./include/linux/ring_buffer.h" 2 + +struct trace_buffer; +struct ring_buffer_iter; + + + + +struct ring_buffer_event { + u32 type_len:5, time_delta:27; + + u32 array[]; +}; +# 55 "./include/linux/ring_buffer.h" +enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, + RINGBUF_TYPE_PADDING, + RINGBUF_TYPE_TIME_EXTEND, + RINGBUF_TYPE_TIME_STAMP, +}; + +unsigned ring_buffer_event_length(struct ring_buffer_event *event); +void *ring_buffer_event_data(struct ring_buffer_event *event); +u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); +# 80 "./include/linux/ring_buffer.h" +void ring_buffer_discard_commit(struct trace_buffer *buffer, + struct ring_buffer_event *event); + + + + +struct trace_buffer * +__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); +# 100 "./include/linux/ring_buffer.h" +int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full); +__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, + struct file *filp, poll_table *poll_table); + + + + +void ring_buffer_free(struct trace_buffer *buffer); + +int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu); + +void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val); + +struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer, + unsigned long length); +int ring_buffer_unlock_commit(struct trace_buffer *buffer, + struct ring_buffer_event *event); +int ring_buffer_write(struct trace_buffer *buffer, + unsigned long length, void *data); + +void ring_buffer_nest_start(struct trace_buffer *buffer); +void ring_buffer_nest_end(struct trace_buffer *buffer); + +struct ring_buffer_event * +ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, + unsigned long *lost_events); +struct ring_buffer_event * +ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, + unsigned long *lost_events); + +struct ring_buffer_iter * +ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags); +void ring_buffer_read_prepare_sync(void); +void ring_buffer_read_start(struct ring_buffer_iter *iter); +void ring_buffer_read_finish(struct ring_buffer_iter *iter); + +struct ring_buffer_event * +ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); +void ring_buffer_iter_advance(struct ring_buffer_iter *iter); +void ring_buffer_iter_reset(struct ring_buffer_iter *iter); +int ring_buffer_iter_empty(struct ring_buffer_iter *iter); +bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); + +unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); + +void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); +void ring_buffer_reset(struct trace_buffer *buffer); + + +int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, + struct trace_buffer *buffer_b, int cpu); +# 160 "./include/linux/ring_buffer.h" +bool ring_buffer_empty(struct trace_buffer *buffer); +bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu); + +void ring_buffer_record_disable(struct trace_buffer *buffer); +void ring_buffer_record_enable(struct trace_buffer *buffer); +void ring_buffer_record_off(struct trace_buffer *buffer); +void ring_buffer_record_on(struct trace_buffer *buffer); +bool ring_buffer_record_is_on(struct trace_buffer *buffer); +bool ring_buffer_record_is_set_on(struct trace_buffer *buffer); +void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu); +void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu); + +u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_entries(struct trace_buffer *buffer); +unsigned long ring_buffer_overruns(struct trace_buffer *buffer); +unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); +unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); + +u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu); +void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, + int cpu, u64 *ts); +void ring_buffer_set_clock(struct trace_buffer *buffer, + u64 (*clock)(void)); +void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs); +bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer); + +size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu); +size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu); + +void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); +void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data); +int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page, + size_t len, int cpu, int full); + +struct trace_seq; + +int ring_buffer_print_entry_header(struct trace_seq *s); +int ring_buffer_print_page_header(struct trace_seq *s); + +enum ring_buffer_flags { + RB_FL_OVERWRITE = 1 << 0, +}; + + +int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node); +# 7 "./include/linux/trace_events.h" 2 +# 1 "./include/linux/trace_seq.h" 1 + + + + +# 1 "./include/linux/seq_buf.h" 1 +# 19 "./include/linux/seq_buf.h" +struct seq_buf { + char *buffer; + size_t size; + size_t len; + loff_t readpos; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void seq_buf_clear(struct seq_buf *s) +{ + s->len = 0; + s->readpos = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) +{ + s->buffer = buf; + s->size = size; + seq_buf_clear(s); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +seq_buf_has_overflowed(struct seq_buf *s) +{ + return s->len > s->size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +seq_buf_set_overflow(struct seq_buf *s) +{ + s->len = s->size + 1; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int +seq_buf_buffer_left(struct seq_buf *s) +{ + if (seq_buf_has_overflowed(s)) + return 0; + + return s->size - s->len; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int seq_buf_used(struct seq_buf *s) +{ + return __builtin_choose_expr(((!!(sizeof((typeof(s->len) *)1 == (typeof(s->size) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(s->len) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(s->size) * 0l)) : (int *)8))))), ((s->len) < (s->size) ? (s->len) : (s->size)), ({ typeof(s->len) __UNIQUE_ID___x965 = (s->len); typeof(s->size) __UNIQUE_ID___y966 = (s->size); ((__UNIQUE_ID___x965) < (__UNIQUE_ID___y966) ? (__UNIQUE_ID___x965) : (__UNIQUE_ID___y966)); })); +} +# 82 "./include/linux/seq_buf.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) +{ + ({ int __ret_warn_on = !!(s->len > s->size + 1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (967)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/seq_buf.h"), "i" (84), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (968)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (969)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + + if (s->len < s->size) { + *bufp = s->buffer + s->len; + return s->size - s->len; + } + + *bufp = ((void *)0); + return 0; +} +# 104 "./include/linux/seq_buf.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void seq_buf_commit(struct seq_buf *s, int num) +{ + if (num < 0) { + seq_buf_set_overflow(s); + } else { + + do { if (__builtin_expect(!!(s->len + num > s->size), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (970)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/seq_buf.h"), "i" (110), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (971)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + s->len += num; + } +} + +extern __attribute__((__format__(printf, 2, 3))) +int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); +extern __attribute__((__format__(printf, 2, 0))) +int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); +extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); +extern int seq_buf_to_user(struct seq_buf *s, char *ubuf, + int cnt); +extern int seq_buf_puts(struct seq_buf *s, const char *str); +extern int seq_buf_putc(struct seq_buf *s, unsigned char c); +extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); +extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, + unsigned int len); +extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); +extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); + + +extern int +seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); +# 6 "./include/linux/trace_seq.h" 2 +# 14 "./include/linux/trace_seq.h" +struct trace_seq { + unsigned char buffer[((1UL) << 12)]; + struct seq_buf seq; + int full; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +trace_seq_init(struct trace_seq *s) +{ + seq_buf_init(&s->seq, s->buffer, ((1UL) << 12)); + s->full = 0; +} +# 40 "./include/linux/trace_seq.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int trace_seq_used(struct trace_seq *s) +{ + return seq_buf_used(&s->seq); +} +# 54 "./include/linux/trace_seq.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char * +trace_seq_buffer_ptr(struct trace_seq *s) +{ + return s->buffer + seq_buf_used(&s->seq); +} +# 67 "./include/linux/trace_seq.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_seq_has_overflowed(struct trace_seq *s) +{ + return s->full || seq_buf_has_overflowed(&s->seq); +} + + + + + +extern __attribute__((__format__(printf, 2, 3))) +void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); +extern __attribute__((__format__(printf, 2, 0))) +void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); +extern void +trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); +extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); +extern int trace_seq_to_user(struct trace_seq *s, char *ubuf, + int cnt); +extern void trace_seq_puts(struct trace_seq *s, const char *str); +extern void trace_seq_putc(struct trace_seq *s, unsigned char c); +extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); +extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, + unsigned int len); +extern int trace_seq_path(struct trace_seq *s, const struct path *path); + +extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, + int nmaskbits); + +extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +# 8 "./include/linux/trace_events.h" 2 + +# 1 "./include/linux/hardirq.h" 1 + + + + +# 1 "./include/linux/context_tracking_state.h" 1 + + + + + +# 1 "./include/linux/static_key.h" 1 +# 7 "./include/linux/context_tracking_state.h" 2 + +struct context_tracking { + + + + + + + bool active; + int recursion; + enum ctx_state { + CONTEXT_DISABLED = -1, + CONTEXT_KERNEL = 0, + CONTEXT_USER, + CONTEXT_GUEST, + } state; +}; +# 49 "./include/linux/context_tracking_state.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool context_tracking_in_user(void) { return false; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool context_tracking_enabled(void) { return false; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool context_tracking_enabled_cpu(int cpu) { return false; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool context_tracking_enabled_this_cpu(void) { return false; } +# 6 "./include/linux/hardirq.h" 2 + + +# 1 "./include/linux/ftrace_irq.h" 1 + + + + + +extern bool trace_hwlat_callback_enabled; +extern void trace_hwlat_callback(bool enter); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_nmi_enter(void) +{ + + if (trace_hwlat_callback_enabled) + trace_hwlat_callback(true); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_nmi_exit(void) +{ + + if (trace_hwlat_callback_enabled) + trace_hwlat_callback(false); + +} +# 9 "./include/linux/hardirq.h" 2 +# 1 "./include/linux/vtime.h" 1 +# 11 "./include/linux/vtime.h" +struct task_struct; +# 54 "./include/linux/vtime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vtime_accounting_enabled_cpu(int cpu) {return false; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vtime_accounting_enabled_this_cpu(void) { return false; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_task_switch(struct task_struct *prev) { } +# 67 "./include/linux/vtime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_account_kernel(struct task_struct *tsk) { } +# 78 "./include/linux/vtime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_user_enter(struct task_struct *tsk) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_user_exit(struct task_struct *tsk) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_guest_enter(struct task_struct *tsk) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_guest_exit(struct task_struct *tsk) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_init_idle(struct task_struct *tsk, int cpu) { } +# 94 "./include/linux/vtime.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_account_irq_enter(struct task_struct *tsk) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_account_irq_exit(struct task_struct *tsk) { } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vtime_flush(struct task_struct *tsk) { } + + + + +extern void irqtime_account_irq(struct task_struct *tsk); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void account_irq_enter_time(struct task_struct *tsk) +{ + vtime_account_irq_enter(tsk); + irqtime_account_irq(tsk); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void account_irq_exit_time(struct task_struct *tsk) +{ + vtime_account_irq_exit(tsk); + irqtime_account_irq(tsk); +} +# 10 "./include/linux/hardirq.h" 2 + + +extern void synchronize_irq(unsigned int irq); +extern bool synchronize_hardirq(unsigned int irq); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __rcu_irq_enter_check_tick(void) { } + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void rcu_irq_enter_check_tick(void) +{ + if (context_tracking_enabled()) + __rcu_irq_enter_check_tick(); +} +# 54 "./include/linux/hardirq.h" +void irq_enter(void); + + + +void irq_enter_rcu(void); +# 82 "./include/linux/hardirq.h" +void irq_exit(void); + + + + +void irq_exit_rcu(void); +# 98 "./include/linux/hardirq.h" +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); +# 10 "./include/linux/trace_events.h" 2 +# 1 "./include/linux/perf_event.h" 1 +# 17 "./include/linux/perf_event.h" +# 1 "./include/uapi/linux/perf_event.h" 1 +# 29 "./include/uapi/linux/perf_event.h" +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + + PERF_TYPE_MAX, +}; + + + + + + +enum perf_hw_id { + + + + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + + PERF_COUNT_HW_MAX, +}; +# 70 "./include/uapi/linux/perf_event.h" +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + + PERF_COUNT_HW_CACHE_MAX, +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + + PERF_COUNT_HW_CACHE_OP_MAX, +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + + PERF_COUNT_HW_CACHE_RESULT_MAX, +}; + + + + + + + +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + + PERF_COUNT_SW_MAX, +}; + + + + + +enum perf_event_sample_format { + PERF_SAMPLE_IP = 1U << 0, + PERF_SAMPLE_TID = 1U << 1, + PERF_SAMPLE_TIME = 1U << 2, + PERF_SAMPLE_ADDR = 1U << 3, + PERF_SAMPLE_READ = 1U << 4, + PERF_SAMPLE_CALLCHAIN = 1U << 5, + PERF_SAMPLE_ID = 1U << 6, + PERF_SAMPLE_CPU = 1U << 7, + PERF_SAMPLE_PERIOD = 1U << 8, + PERF_SAMPLE_STREAM_ID = 1U << 9, + PERF_SAMPLE_RAW = 1U << 10, + PERF_SAMPLE_BRANCH_STACK = 1U << 11, + PERF_SAMPLE_REGS_USER = 1U << 12, + PERF_SAMPLE_STACK_USER = 1U << 13, + PERF_SAMPLE_WEIGHT = 1U << 14, + PERF_SAMPLE_DATA_SRC = 1U << 15, + PERF_SAMPLE_IDENTIFIER = 1U << 16, + PERF_SAMPLE_TRANSACTION = 1U << 17, + PERF_SAMPLE_REGS_INTR = 1U << 18, + PERF_SAMPLE_PHYS_ADDR = 1U << 19, + PERF_SAMPLE_AUX = 1U << 20, + PERF_SAMPLE_CGROUP = 1U << 21, + + PERF_SAMPLE_MAX = 1U << 22, + + __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, +}; +# 162 "./include/uapi/linux/perf_event.h" +enum perf_branch_sample_type_shift { + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, + + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, + PERF_SAMPLE_BRANCH_COND_SHIFT = 10, + + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, + PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, + + PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, + PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, + + PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, + + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, + + PERF_SAMPLE_BRANCH_MAX_SHIFT +}; + +enum perf_branch_sample_type { + PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, + PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, + PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, + + PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, + PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, + PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, + PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, + PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, + PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, + + PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, + PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, + PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, + + PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, + PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, + + PERF_SAMPLE_BRANCH_TYPE_SAVE = + 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, + + PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, + + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, +}; + + + + +enum { + PERF_BR_UNKNOWN = 0, + PERF_BR_COND = 1, + PERF_BR_UNCOND = 2, + PERF_BR_IND = 3, + PERF_BR_CALL = 4, + PERF_BR_IND_CALL = 5, + PERF_BR_RET = 6, + PERF_BR_SYSCALL = 7, + PERF_BR_SYSRET = 8, + PERF_BR_COND_CALL = 9, + PERF_BR_COND_RET = 10, + PERF_BR_MAX, +}; +# 245 "./include/uapi/linux/perf_event.h" +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + + + + + +enum { + PERF_TXN_ELISION = (1 << 0), + PERF_TXN_TRANSACTION = (1 << 1), + PERF_TXN_SYNC = (1 << 2), + PERF_TXN_ASYNC = (1 << 3), + PERF_TXN_RETRY = (1 << 4), + PERF_TXN_CONFLICT = (1 << 5), + PERF_TXN_CAPACITY_WRITE = (1 << 6), + PERF_TXN_CAPACITY_READ = (1 << 7), + + PERF_TXN_MAX = (1 << 8), + + + + PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), + PERF_TXN_ABORT_SHIFT = 32, +}; +# 293 "./include/uapi/linux/perf_event.h" +enum perf_event_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, + PERF_FORMAT_ID = 1U << 2, + PERF_FORMAT_GROUP = 1U << 3, + + PERF_FORMAT_MAX = 1U << 4, +}; +# 317 "./include/uapi/linux/perf_event.h" +struct perf_event_attr { + + + + + __u32 type; + + + + + __u32 size; + + + + + __u64 config; + + union { + __u64 sample_period; + __u64 sample_freq; + }; + + __u64 sample_type; + __u64 read_format; + + __u64 disabled : 1, + inherit : 1, + pinned : 1, + exclusive : 1, + exclude_user : 1, + exclude_kernel : 1, + exclude_hv : 1, + exclude_idle : 1, + mmap : 1, + comm : 1, + freq : 1, + inherit_stat : 1, + enable_on_exec : 1, + task : 1, + watermark : 1, +# 367 "./include/uapi/linux/perf_event.h" + precise_ip : 2, + mmap_data : 1, + sample_id_all : 1, + + exclude_host : 1, + exclude_guest : 1, + + exclude_callchain_kernel : 1, + exclude_callchain_user : 1, + mmap2 : 1, + comm_exec : 1, + use_clockid : 1, + context_switch : 1, + write_backward : 1, + namespaces : 1, + ksymbol : 1, + bpf_event : 1, + aux_output : 1, + cgroup : 1, + __reserved_1 : 31; + + union { + __u32 wakeup_events; + __u32 wakeup_watermark; + }; + + __u32 bp_type; + union { + __u64 bp_addr; + __u64 kprobe_func; + __u64 uprobe_path; + __u64 config1; + }; + union { + __u64 bp_len; + __u64 kprobe_addr; + __u64 probe_offset; + __u64 config2; + }; + __u64 branch_sample_type; + + + + + + __u64 sample_regs_user; + + + + + __u32 sample_stack_user; + + __s32 clockid; +# 428 "./include/uapi/linux/perf_event.h" + __u64 sample_regs_intr; + + + + + __u32 aux_watermark; + __u16 sample_max_stack; + __u16 __reserved_2; + __u32 aux_sample_size; + __u32 __reserved_3; +}; + + + + + + +struct perf_event_query_bpf { + + + + __u32 ids_len; + + + + + __u32 prog_cnt; + + + + __u32 ids[0]; +}; +# 477 "./include/uapi/linux/perf_event.h" +enum perf_event_ioc_flags { + PERF_IOC_FLAG_GROUP = 1U << 0, +}; + + + + +struct perf_event_mmap_page { + __u32 version; + __u32 compat_version; +# 523 "./include/uapi/linux/perf_event.h" + __u32 lock; + __u32 index; + __s64 offset; + __u64 time_enabled; + __u64 time_running; + union { + __u64 capabilities; + struct { + __u64 cap_bit0 : 1, + cap_bit0_is_deprecated : 1, + + cap_user_rdpmc : 1, + cap_user_time : 1, + cap_user_time_zero : 1, + cap_____res : 59; + }; + }; +# 550 "./include/uapi/linux/perf_event.h" + __u16 pmc_width; +# 576 "./include/uapi/linux/perf_event.h" + __u16 time_shift; + __u32 time_mult; + __u64 time_offset; +# 595 "./include/uapi/linux/perf_event.h" + __u64 time_zero; + __u32 size; + + + + + + __u8 __reserved[118*8+4]; +# 620 "./include/uapi/linux/perf_event.h" + __u64 data_head; + __u64 data_tail; + __u64 data_offset; + __u64 data_size; +# 636 "./include/uapi/linux/perf_event.h" + __u64 aux_head; + __u64 aux_tail; + __u64 aux_offset; + __u64 aux_size; +}; +# 690 "./include/uapi/linux/perf_event.h" +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +struct perf_ns_link_info { + __u64 dev; + __u64 ino; +}; + +enum { + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + + NR_NAMESPACES, +}; + +enum perf_event_type { +# 754 "./include/uapi/linux/perf_event.h" + PERF_RECORD_MMAP = 1, +# 764 "./include/uapi/linux/perf_event.h" + PERF_RECORD_LOST = 2, +# 775 "./include/uapi/linux/perf_event.h" + PERF_RECORD_COMM = 3, +# 786 "./include/uapi/linux/perf_event.h" + PERF_RECORD_EXIT = 4, +# 797 "./include/uapi/linux/perf_event.h" + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, +# 809 "./include/uapi/linux/perf_event.h" + PERF_RECORD_FORK = 7, +# 820 "./include/uapi/linux/perf_event.h" + PERF_RECORD_READ = 8, +# 883 "./include/uapi/linux/perf_event.h" + PERF_RECORD_SAMPLE = 9, +# 905 "./include/uapi/linux/perf_event.h" + PERF_RECORD_MMAP2 = 10, +# 919 "./include/uapi/linux/perf_event.h" + PERF_RECORD_AUX = 11, +# 931 "./include/uapi/linux/perf_event.h" + PERF_RECORD_ITRACE_START = 12, +# 943 "./include/uapi/linux/perf_event.h" + PERF_RECORD_LOST_SAMPLES = 13, +# 955 "./include/uapi/linux/perf_event.h" + PERF_RECORD_SWITCH = 14, +# 969 "./include/uapi/linux/perf_event.h" + PERF_RECORD_SWITCH_CPU_WIDE = 15, +# 981 "./include/uapi/linux/perf_event.h" + PERF_RECORD_NAMESPACES = 16, +# 996 "./include/uapi/linux/perf_event.h" + PERF_RECORD_KSYMBOL = 17, +# 1015 "./include/uapi/linux/perf_event.h" + PERF_RECORD_BPF_EVENT = 18, +# 1025 "./include/uapi/linux/perf_event.h" + PERF_RECORD_CGROUP = 19, + + PERF_RECORD_MAX, +}; + +enum perf_record_ksymbol_type { + PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0, + PERF_RECORD_KSYMBOL_TYPE_BPF = 1, + PERF_RECORD_KSYMBOL_TYPE_MAX +}; + + + +enum perf_bpf_event_type { + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX, +}; + + + + +enum perf_callchain_context { + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, + + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, + + PERF_CONTEXT_MAX = (__u64)-4095, +}; +# 1074 "./include/uapi/linux/perf_event.h" +union perf_mem_data_src { + __u64 val; + struct { + __u64 mem_op:5, + mem_lvl:14, + mem_snoop:5, + mem_lock:2, + mem_dtlb:7, + mem_lvl_num:4, + mem_remote:1, + mem_snoopx:2, + mem_rsvd:24; + }; +}; +# 1194 "./include/uapi/linux/perf_event.h" +struct perf_branch_entry { + __u64 from; + __u64 to; + __u64 mispred:1, + predicted:1, + in_tx:1, + abort:1, + cycles:16, + type:4, + reserved:40; +}; +# 18 "./include/linux/perf_event.h" 2 +# 1 "./include/uapi/linux/bpf_perf_event.h" 1 +# 11 "./include/uapi/linux/bpf_perf_event.h" +# 1 "./arch/x86/include/generated/uapi/asm/bpf_perf_event.h" 1 +# 1 "./include/uapi/asm-generic/bpf_perf_event.h" 1 + + + +# 1 "./include/linux/ptrace.h" 1 +# 10 "./include/linux/ptrace.h" +# 1 "./include/linux/pid_namespace.h" 1 +# 10 "./include/linux/pid_namespace.h" +# 1 "./include/linux/nsproxy.h" 1 + + + + + + + +struct mnt_namespace; +struct uts_namespace; +struct ipc_namespace; +struct pid_namespace; +struct cgroup_namespace; +struct fs_struct; +# 31 "./include/linux/nsproxy.h" +struct nsproxy { + atomic_t count; + struct uts_namespace *uts_ns; + struct ipc_namespace *ipc_ns; + struct mnt_namespace *mnt_ns; + struct pid_namespace *pid_ns_for_children; + struct net *net_ns; + struct time_namespace *time_ns; + struct time_namespace *time_ns_for_children; + struct cgroup_namespace *cgroup_ns; +}; +extern struct nsproxy init_nsproxy; +# 53 "./include/linux/nsproxy.h" +struct nsset { + unsigned flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cred *nsset_cred(struct nsset *set) +{ + if (set->flags & 0x10000000) + return (struct cred *)set->cred; + + return ((void *)0); +} +# 94 "./include/linux/nsproxy.h" +int copy_namespaces(unsigned long flags, struct task_struct *tsk); +void exit_task_namespaces(struct task_struct *tsk); +void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); +void free_nsproxy(struct nsproxy *ns); +int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, + struct cred *, struct fs_struct *); +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) nsproxy_cache_init(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_nsproxy(struct nsproxy *ns) +{ + if (atomic_dec_and_test(&ns->count)) { + free_nsproxy(ns); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_nsproxy(struct nsproxy *ns) +{ + atomic_inc(&ns->count); +} +# 11 "./include/linux/pid_namespace.h" 2 +# 1 "./include/linux/kref.h" 1 +# 19 "./include/linux/kref.h" +struct kref { + refcount_t refcount; +}; + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kref_init(struct kref *kref) +{ + refcount_set(&kref->refcount, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int kref_read(const struct kref *kref) +{ + return refcount_read(&kref->refcount); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kref_get(struct kref *kref) +{ + refcount_inc(&kref->refcount); +} +# 62 "./include/linux/kref.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kref_put(struct kref *kref, void (*release)(struct kref *kref)) +{ + if (refcount_dec_and_test(&kref->refcount)) { + release(kref); + return 1; + } + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kref_put_mutex(struct kref *kref, + void (*release)(struct kref *kref), + struct mutex *lock) +{ + if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) { + release(kref); + return 1; + } + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kref_put_lock(struct kref *kref, + void (*release)(struct kref *kref), + spinlock_t *lock) +{ + if (refcount_dec_and_lock(&kref->refcount, lock)) { + release(kref); + return 1; + } + return 0; +} +# 109 "./include/linux/kref.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) kref_get_unless_zero(struct kref *kref) +{ + return refcount_inc_not_zero(&kref->refcount); +} +# 12 "./include/linux/pid_namespace.h" 2 +# 1 "./include/linux/ns_common.h" 1 + + + + +struct proc_ns_operations; + +struct ns_common { + atomic_long_t stashed; + const struct proc_ns_operations *ops; + unsigned int inum; +}; +# 13 "./include/linux/pid_namespace.h" 2 +# 1 "./include/linux/idr.h" 1 +# 19 "./include/linux/idr.h" +struct idr { + struct xarray idr_rt; + unsigned int idr_base; + unsigned int idr_next; +}; +# 66 "./include/linux/idr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int idr_get_cursor(const struct idr *idr) +{ + return ({ do { extern void __compiletime_assert_972(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(idr->idr_next) == sizeof(char) || sizeof(idr->idr_next) == sizeof(short) || sizeof(idr->idr_next) == sizeof(int) || sizeof(idr->idr_next) == sizeof(long)) || sizeof(idr->idr_next) == sizeof(long long))) __compiletime_assert_972(); } while (0); ({ typeof( _Generic((idr->idr_next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (idr->idr_next))) __x = (*(const volatile typeof( _Generic((idr->idr_next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (idr->idr_next))) *)&(idr->idr_next)); do { } while (0); (typeof(idr->idr_next))__x; }); }); +} +# 79 "./include/linux/idr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void idr_set_cursor(struct idr *idr, unsigned int val) +{ + do { do { extern void __compiletime_assert_973(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(idr->idr_next) == sizeof(char) || sizeof(idr->idr_next) == sizeof(short) || sizeof(idr->idr_next) == sizeof(int) || sizeof(idr->idr_next) == sizeof(long)) || sizeof(idr->idr_next) == sizeof(long long))) __compiletime_assert_973(); } while (0); do { *(volatile typeof(idr->idr_next) *)&(idr->idr_next) = (val); } while (0); } while (0); +} +# 112 "./include/linux/idr.h" +void idr_preload(gfp_t gfp_mask); + +int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); +int __attribute__((__warn_unused_result__)) idr_alloc_u32(struct idr *, void *ptr, u32 *id, + unsigned long max, gfp_t); +int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); +void *idr_remove(struct idr *, unsigned long id); +void *idr_find(const struct idr *, unsigned long id); +int idr_for_each(const struct idr *, + int (*fn)(int id, void *p, void *data), void *data); +void *idr_get_next(struct idr *, int *nextid); +void *idr_get_next_ul(struct idr *, unsigned long *nextid); +void *idr_replace(struct idr *, void *, unsigned long id); +void idr_destroy(struct idr *); +# 135 "./include/linux/idr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void idr_init_base(struct idr *idr, int base) +{ + xa_init_flags(&idr->idr_rt, ((( gfp_t)4) | ( gfp_t) (1 << (((23 + 1)) + 0)))); + idr->idr_base = base; + idr->idr_next = 0; +} +# 149 "./include/linux/idr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void idr_init(struct idr *idr) +{ + idr_init_base(idr, 0); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool idr_is_empty(const struct idr *idr) +{ + return radix_tree_empty(&idr->idr_rt) && + radix_tree_tagged(&idr->idr_rt, 0); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void idr_preload_end(void) +{ + do { local_lock_release(({ do { const void *__vpp_verify = (typeof((&radix_tree_preloads.lock) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&radix_tree_preloads.lock)); (typeof(*(&radix_tree_preloads.lock)) *)tcp_ptr__; }); })); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); +} +# 240 "./include/linux/idr.h" +struct ida_bitmap { + unsigned long bitmap[(128 / sizeof(long))]; +}; + +struct ida { + struct xarray xa; +}; +# 255 "./include/linux/idr.h" +int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t); +void ida_free(struct ida *, unsigned int id); +void ida_destroy(struct ida *ida); +# 270 "./include/linux/idr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_alloc_range(ida, 0, ~0, gfp); +} +# 287 "./include/linux/idr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) +{ + return ida_alloc_range(ida, min, ~0, gfp); +} +# 304 "./include/linux/idr.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) +{ + return ida_alloc_range(ida, 0, max, gfp); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ida_init(struct ida *ida) +{ + xa_init_flags(&ida->xa, ((( gfp_t)XA_LOCK_IRQ) | ((( gfp_t)4U) | (( gfp_t)((1U << (23 + 1)) << ( unsigned)((( xa_mark_t)0U))))))); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ida_is_empty(const struct ida *ida) +{ + return xa_empty(&ida->xa); +} +# 14 "./include/linux/pid_namespace.h" 2 + + + + +struct fs_pin; + +struct pid_namespace { + struct kref kref; + struct idr idr; + struct callback_head rcu; + unsigned int pid_allocated; + struct task_struct *child_reaper; + struct kmem_cache *pid_cachep; + unsigned int level; + struct pid_namespace *parent; + + struct fs_pin *bacct; + + struct user_namespace *user_ns; + struct ucounts *ucounts; + int reboot; + struct ns_common ns; +} __attribute__((__designated_init__)); + +extern struct pid_namespace init_pid_ns; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pid_namespace *get_pid_ns(struct pid_namespace *ns) +{ + if (ns != &init_pid_ns) + kref_get(&ns->kref); + return ns; +} + +extern struct pid_namespace *copy_pid_ns(unsigned long flags, + struct user_namespace *user_ns, struct pid_namespace *ns); +extern void zap_pid_ns_processes(struct pid_namespace *pid_ns); +extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd); +extern void put_pid_ns(struct pid_namespace *ns); +# 87 "./include/linux/pid_namespace.h" +extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); +void pidhash_init(void); +void pid_idr_init(void); +# 11 "./include/linux/ptrace.h" 2 +# 1 "./include/uapi/linux/ptrace.h" 1 +# 59 "./include/uapi/linux/ptrace.h" +struct ptrace_peeksiginfo_args { + __u64 off; + __u32 flags; + __s32 nr; +}; + + + + + + + +struct seccomp_metadata { + __u64 filter_off; + __u64 flags; +}; + + + + + + + +struct ptrace_syscall_info { + __u8 op; + __u32 arch __attribute__((__aligned__(sizeof(__u32)))); + __u64 instruction_pointer; + __u64 stack_pointer; + union { + struct { + __u64 nr; + __u64 args[6]; + } entry; + struct { + __s64 rval; + __u8 is_error; + } exit; + struct { + __u64 nr; + __u64 args[6]; + __u32 ret_data; + } seccomp; + }; +}; +# 12 "./include/linux/ptrace.h" 2 + + + +struct syscall_info { + __u64 sp; + struct seccomp_data data; +}; + +extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, + void *buf, int len, unsigned int gup_flags); +# 56 "./include/linux/ptrace.h" +extern long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data); +extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len); +extern int ptrace_writedata(struct task_struct *tsk, char *src, unsigned long dst, int len); +extern void ptrace_disable(struct task_struct *); +extern int ptrace_request(struct task_struct *child, long request, + unsigned long addr, unsigned long data); +extern void ptrace_notify(int exit_code); +extern void __ptrace_link(struct task_struct *child, + struct task_struct *new_parent, + const struct cred *ptracer_cred); +extern void __ptrace_unlink(struct task_struct *child); +extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); +# 95 "./include/linux/ptrace.h" +extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ptrace_reparented(struct task_struct *child) +{ + return !same_thread_group(child->real_parent, child->parent); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptrace_unlink(struct task_struct *child) +{ + if (__builtin_expect(!!(child->ptrace), 0)) + __ptrace_unlink(child); +} + +int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, + unsigned long data); +int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, + unsigned long data); +# 124 "./include/linux/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct task_struct *ptrace_parent(struct task_struct *task) +{ + if (__builtin_expect(!!(task->ptrace), 0)) + return ({ typeof(*(task->parent)) *________p1 = (typeof(*(task->parent)) *)({ do { extern void __compiletime_assert_974(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((task->parent)) == sizeof(char) || sizeof((task->parent)) == sizeof(short) || sizeof((task->parent)) == sizeof(int) || sizeof((task->parent)) == sizeof(long)) || sizeof((task->parent)) == sizeof(long long))) __compiletime_assert_974(); } while (0); ({ typeof( _Generic(((task->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((task->parent)))) __x = (*(const volatile typeof( _Generic(((task->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((task->parent)))) *)&((task->parent))); do { } while (0); (typeof((task->parent)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/ptrace.h", 127, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(task->parent)) *)(________p1)); }); + return ((void *)0); +} +# 140 "./include/linux/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ptrace_event_enabled(struct task_struct *task, int event) +{ + return task->ptrace & (1 << (3 + (event))); +} +# 155 "./include/linux/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptrace_event(int event, unsigned long message) +{ + if (__builtin_expect(!!(ptrace_event_enabled(get_current(), event)), 0)) { + get_current()->ptrace_message = message; + ptrace_notify((event << 8) | 5); + } else if (event == 4) { + + if ((get_current()->ptrace & (0x00000001|0x00010000)) == 0x00000001) + send_sig(5, get_current(), 0); + } +} +# 178 "./include/linux/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptrace_event_pid(int event, struct pid *pid) +{ + + + + + + + unsigned long message = 0; + struct pid_namespace *ns; + + rcu_read_lock(); + ns = task_active_pid_ns(({ typeof(*(get_current()->parent)) *________p1 = (typeof(*(get_current()->parent)) *)({ do { extern void __compiletime_assert_975(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((get_current()->parent)) == sizeof(char) || sizeof((get_current()->parent)) == sizeof(short) || sizeof((get_current()->parent)) == sizeof(int) || sizeof((get_current()->parent)) == sizeof(long)) || sizeof((get_current()->parent)) == sizeof(long long))) __compiletime_assert_975(); } while (0); ({ typeof( _Generic(((get_current()->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((get_current()->parent)))) __x = (*(const volatile typeof( _Generic(((get_current()->parent)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((get_current()->parent)))) *)&((get_current()->parent))); do { } while (0); (typeof((get_current()->parent)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/ptrace.h", 190, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(get_current()->parent)) *)(________p1)); })); + if (ns) + message = pid_nr_ns(pid, ns); + rcu_read_unlock(); + + ptrace_event(event, message); +} +# 208 "./include/linux/ptrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptrace_init_task(struct task_struct *child, bool ptrace) +{ + INIT_LIST_HEAD(&child->ptrace_entry); + INIT_LIST_HEAD(&child->ptraced); + child->jobctl = 0; + child->ptrace = 0; + child->parent = child->real_parent; + + if (__builtin_expect(!!(ptrace), 0) && get_current()->ptrace) { + child->ptrace = get_current()->ptrace; + __ptrace_link(child, get_current()->parent, get_current()->ptracer_cred); + + if (child->ptrace & 0x00010000) + task_set_jobctl_pending(child, (1UL << 19)); + else + sigaddset(&child->pending.signal, 19); + } + else + child->ptracer_cred = ((void *)0); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ptrace_release_task(struct task_struct *task) +{ + do { if (__builtin_expect(!!(!list_empty(&task->ptraced)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (976)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/ptrace.h"), "i" (237), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (977)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + ptrace_unlink(task); + do { if (__builtin_expect(!!(!list_empty(&task->ptrace_entry)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (978)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/ptrace.h"), "i" (239), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (979)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); +} +# 313 "./include/linux/ptrace.h" +extern void user_enable_single_step(struct task_struct *); +extern void user_disable_single_step(struct task_struct *); +# 343 "./include/linux/ptrace.h" +extern void user_enable_block_step(struct task_struct *); + + + +extern void user_single_step_report(struct pt_regs *regs); +# 417 "./include/linux/ptrace.h" +extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); + +extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); +# 5 "./include/uapi/asm-generic/bpf_perf_event.h" 2 + + +typedef struct pt_regs bpf_user_pt_regs_t; +# 1 "./arch/x86/include/generated/uapi/asm/bpf_perf_event.h" 2 +# 12 "./include/uapi/linux/bpf_perf_event.h" 2 + +struct bpf_perf_event_data { + bpf_user_pt_regs_t regs; + __u64 sample_period; + __u64 addr; +}; +# 19 "./include/linux/perf_event.h" 2 + + + + + + +# 1 "./arch/x86/include/asm/perf_event.h" 1 +# 113 "./arch/x86/include/asm/perf_event.h" +union cpuid10_eax { + struct { + unsigned int version_id:8; + unsigned int num_counters:8; + unsigned int bit_width:8; + unsigned int mask_length:8; + } split; + unsigned int full; +}; + +union cpuid10_ebx { + struct { + unsigned int no_unhalted_core_cycles:1; + unsigned int no_instructions_retired:1; + unsigned int no_unhalted_reference_cycles:1; + unsigned int no_llc_reference:1; + unsigned int no_llc_misses:1; + unsigned int no_branch_instruction_retired:1; + unsigned int no_branch_misses_retired:1; + } split; + unsigned int full; +}; + +union cpuid10_edx { + struct { + unsigned int num_counters_fixed:5; + unsigned int bit_width_fixed:8; + unsigned int reserved:19; + } split; + unsigned int full; +}; + +struct x86_pmu_capability { + int version; + int num_counters_gp; + int num_counters_fixed; + int bit_width_gp; + int bit_width_fixed; + unsigned int events_mask; + int events_mask_len; +}; +# 202 "./arch/x86/include/asm/perf_event.h" +struct pebs_basic { + u64 format_size; + u64 ip; + u64 applicable_counters; + u64 tsc; +}; + +struct pebs_meminfo { + u64 address; + u64 aux; + u64 latency; + u64 tsx_tuning; +}; + +struct pebs_gprs { + u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; + u64 r8, r9, r10, r11, r12, r13, r14, r15; +}; + +struct pebs_xmm { + u64 xmm[16*2]; +}; + +struct pebs_lbr_entry { + u64 from, to, info; +}; + +struct pebs_lbr { + struct pebs_lbr_entry lbr[0]; +}; +# 288 "./arch/x86/include/asm/perf_event.h" +extern u32 get_ibs_caps(void); + + + + + +extern void perf_events_lapic_init(void); +# 308 "./arch/x86/include/asm/perf_event.h" +struct pt_regs; +struct x86_perf_regs { + struct pt_regs regs; + u64 *xmm_regs; +}; + +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); + + +# 1 "./arch/x86/include/asm/stacktrace.h" 1 +# 13 "./arch/x86/include/asm/stacktrace.h" +# 1 "./arch/x86/include/asm/cpu_entry_area.h" 1 + + + + + + + +# 1 "./arch/x86/include/asm/intel_ds.h" 1 +# 18 "./arch/x86/include/asm/intel_ds.h" +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[8 + 4]; +} __attribute__((__aligned__(((1UL) << 12)))); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_debug_store; extern __attribute__((section(".data..percpu" "..page_aligned"))) __typeof__(struct debug_store) cpu_debug_store __attribute__((__aligned__(((1UL) << 12)))); + +struct debug_store_buffers { + char bts_buffer[(((1UL) << 12) << 4)]; + char pebs_buffer[(((1UL) << 12) << 4)]; +}; +# 9 "./arch/x86/include/asm/cpu_entry_area.h" 2 +# 1 "./arch/x86/include/asm/pgtable_areas.h" 1 +# 10 "./arch/x86/include/asm/cpu_entry_area.h" 2 +# 26 "./arch/x86/include/asm/cpu_entry_area.h" +struct exception_stacks { + char DF_stack_guard[0]; char DF_stack[(((1UL) << 12) << (0 + 1))]; char NMI_stack_guard[0]; char NMI_stack[(((1UL) << 12) << (0 + 1))]; char DB_stack_guard[0]; char DB_stack[(((1UL) << 12) << (0 + 1))]; char MCE_stack_guard[0]; char MCE_stack[(((1UL) << 12) << (0 + 1))]; char IST_top_guard[0]; +}; + + +struct cea_exception_stacks { + char DF_stack_guard[((1UL) << 12)]; char DF_stack[(((1UL) << 12) << (0 + 1))]; char NMI_stack_guard[((1UL) << 12)]; char NMI_stack[(((1UL) << 12) << (0 + 1))]; char DB_stack_guard[((1UL) << 12)]; char DB_stack[(((1UL) << 12) << (0 + 1))]; char MCE_stack_guard[((1UL) << 12)]; char MCE_stack[(((1UL) << 12) << (0 + 1))]; char IST_top_guard[((1UL) << 12)]; +}; + + + + +enum exception_stack_ordering { + ESTACK_DF, + ESTACK_NMI, + ESTACK_DB, + ESTACK_MCE, + N_EXCEPTION_STACKS +}; +# 78 "./arch/x86/include/asm/cpu_entry_area.h" +struct cpu_entry_area { + char gdt[((1UL) << 12)]; +# 89 "./arch/x86/include/asm/cpu_entry_area.h" + struct entry_stack_page entry_stack_page; +# 100 "./arch/x86/include/asm/cpu_entry_area.h" + struct tss_struct tss; + + + + + + struct cea_exception_stacks estacks; + + + + + + struct debug_store cpu_debug_store; + + + + + struct debug_store_buffers cpu_debug_buffers; +}; + + + + + + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_entry_area; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct cpu_entry_area *) cpu_entry_area; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cea_exception_stacks; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct cea_exception_stacks *) cea_exception_stacks; + +extern void setup_cpu_entry_areas(void); +extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); + +extern struct cpu_entry_area *get_cpu_entry_area(int cpu); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct entry_stack *cpu_entry_stack(int cpu) +{ + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; +} +# 14 "./arch/x86/include/asm/stacktrace.h" 2 +# 1 "./arch/x86/include/asm/switch_to.h" 1 + + + + +# 1 "./include/linux/sched/task_stack.h" 1 +# 10 "./include/linux/sched/task_stack.h" +# 1 "./include/uapi/linux/magic.h" 1 +# 11 "./include/linux/sched/task_stack.h" 2 +# 19 "./include/linux/sched/task_stack.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long *end_of_stack(const struct task_struct *task) +{ + return task->stack; +} +# 62 "./include/linux/sched/task_stack.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *try_get_task_stack(struct task_struct *tsk) +{ + return refcount_inc_not_zero(&tsk->stack_refcount) ? + task_stack_page(tsk) : ((void *)0); +} + +extern void put_task_stack(struct task_struct *tsk); +# 81 "./include/linux/sched/task_stack.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int object_is_on_stack(const void *obj) +{ + void *stack = task_stack_page(get_current()); + + return (obj >= stack) && (obj < (stack + (((1UL) << 12) << (2 + 1)))); +} + +extern void thread_stack_cache_init(void); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { + + + + n++; + + } while (!*n); + + + + + return (unsigned long)n - (unsigned long)end_of_stack(p); + +} + +extern void set_task_stack_end_magic(struct task_struct *tsk); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kstack_end(void *addr) +{ + + + + return !(((unsigned long)addr+sizeof(void*)-1) & ((((1UL) << 12) << (2 + 1))-sizeof(void*))); +} +# 6 "./arch/x86/include/asm/switch_to.h" 2 + +struct task_struct; + +struct task_struct *__switch_to_asm(struct task_struct *prev, + struct task_struct *next); + +__attribute__((__externally_visible__)) struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); + + void ret_from_fork(void); + + + + + +struct inactive_task_frame { + + unsigned long r15; + unsigned long r14; + unsigned long r13; + unsigned long r12; + + + + + + unsigned long bx; + + + + + + unsigned long bp; + unsigned long ret_addr; +}; + +struct fork_frame { + struct inactive_task_frame frame; + struct pt_regs regs; +}; +# 65 "./arch/x86/include/asm/switch_to.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void update_task_stack(struct task_struct *task) +{ +# 80 "./arch/x86/include/asm/switch_to.h" + if (( __builtin_constant_p((__builtin_constant_p(( 8*32+16)) && ( (((( 8*32+16))>>5)==(0) && (1UL<<((( 8*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 8*32+16))>>5)==(1) && (1UL<<((( 8*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 8*32+16))>>5)==(2) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(3) && (1UL<<((( 8*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 8*32+16))>>5)==(4) && (1UL<<((( 8*32+16))&31) & (0) )) || (((( 8*32+16))>>5)==(5) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(6) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(7) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(8) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(9) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(10) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(11) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(12) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(13) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(14) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(15) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(16) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(17) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(18) && (1UL<<((( 8*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 8*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p(( 8*32+16)) && ( (((( 8*32+16))>>5)==(0) && (1UL<<((( 8*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 8*32+16))>>5)==(1) && (1UL<<((( 8*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 8*32+16))>>5)==(2) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(3) && (1UL<<((( 8*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 8*32+16))>>5)==(4) && (1UL<<((( 8*32+16))&31) & (0) )) || (((( 8*32+16))>>5)==(5) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(6) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(7) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(8) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(9) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(10) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(11) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(12) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(13) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(14) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(15) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(16) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(17) && (1UL<<((( 8*32+16))&31) & 0 )) || (((( 8*32+16))>>5)==(18) && (1UL<<((( 8*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 8*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has(( 8*32+16)) )) + load_sp0(((unsigned long)(({ unsigned long __ptr = (unsigned long)task_stack_page(task); __ptr += (((1UL) << 12) << (2 + 1)) - 0; ((struct pt_regs *)__ptr) - 1; }) + 1))); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kthread_frame_init(struct inactive_task_frame *frame, + unsigned long fun, unsigned long arg) +{ + frame->bx = fun; + + + + frame->r12 = arg; + +} +# 15 "./arch/x86/include/asm/stacktrace.h" 2 + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, + STACK_TYPE_IRQ, + STACK_TYPE_SOFTIRQ, + STACK_TYPE_ENTRY, + STACK_TYPE_EXCEPTION, + STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1, +}; + +struct stack_info { + enum stack_type type; + unsigned long *begin, *end, *next_sp; +}; + +bool in_task_stack(unsigned long *stack, struct task_struct *task, + struct stack_info *info); + +bool in_entry_stack(unsigned long *stack, struct stack_info *info); + +int get_stack_info(unsigned long *stack, struct task_struct *task, + struct stack_info *info, unsigned long *visit_mask); + +const char *stack_type_name(enum stack_type type); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool on_stack(struct stack_info *info, void *addr, size_t len) +{ + void *begin = info->begin; + void *end = info->end; + + return (info->type != STACK_TYPE_UNKNOWN && + addr >= begin && addr < end && + addr + len > begin && addr + len <= end); +} +# 70 "./arch/x86/include/asm/stacktrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long * +get_frame_pointer(struct task_struct *task, struct pt_regs *regs) +{ + return ((void *)0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long * +get_stack_pointer(struct task_struct *task, struct pt_regs *regs) +{ + if (regs) + return (unsigned long *)regs->sp; + + if (task == get_current()) + return __builtin_frame_address(0); + + return (unsigned long *)task->thread.sp; +} + +void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, + unsigned long *stack, const char *log_lvl); + + +struct stack_frame { + struct stack_frame *next_frame; + unsigned long return_address; +}; + +struct stack_frame_ia32 { + u32 next_frame; + u32 return_address; +}; + +void show_opcodes(struct pt_regs *regs, const char *loglvl); +void show_ip(struct pt_regs *regs, const char *loglvl); +# 319 "./arch/x86/include/asm/perf_event.h" 2 +# 331 "./arch/x86/include/asm/perf_event.h" +struct perf_guest_switch_msr { + unsigned msr; + u64 host, guest; +}; + +extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); +extern void perf_check_microcode(void); +extern int x86_perf_rdpmc_index(struct perf_event *event); +# 350 "./arch/x86/include/asm/perf_event.h" +extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); +# 360 "./arch/x86/include/asm/perf_event.h" + extern void intel_pt_handle_vmx(int on); +# 369 "./arch/x86/include/asm/perf_event.h" + extern void amd_pmu_enable_virt(void); + extern void amd_pmu_disable_virt(void); +# 26 "./include/linux/perf_event.h" 2 +# 1 "./arch/x86/include/asm/local64.h" 1 +# 1 "./include/asm-generic/local64.h" 1 + + + + + +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 7 "./include/asm-generic/local64.h" 2 +# 22 "./include/asm-generic/local64.h" +# 1 "./arch/x86/include/asm/local.h" 1 +# 10 "./arch/x86/include/asm/local.h" +typedef struct { + atomic_long_t a; +} local_t; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_inc(local_t *l) +{ + asm volatile(" " "incq" " " "%0" + : "+m" (l->a.counter)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_dec(local_t *l) +{ + asm volatile(" " "decq" " " "%0" + : "+m" (l->a.counter)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_add(long i, local_t *l) +{ + asm volatile(" " "addq" " " "%1,%0" + : "+m" (l->a.counter) + : "ir" (i)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void local_sub(long i, local_t *l) +{ + asm volatile(" " "subq" " " "%1,%0" + : "+m" (l->a.counter) + : "ir" (i)); +} +# 54 "./arch/x86/include/asm/local.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool local_sub_and_test(long i, local_t *l) +{ + return ({ bool c; asm volatile (" " "subq" " " " %[val], " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (l->a.counter), "=@cc" "e" (c) : [val] "er" (i) : "memory"); c; }); +} +# 67 "./arch/x86/include/asm/local.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool local_dec_and_test(local_t *l) +{ + return ({ bool c; asm volatile (" " "decq" " " " " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (l->a.counter), "=@cc" "e" (c) : : "memory"); c; }); +} +# 80 "./arch/x86/include/asm/local.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool local_inc_and_test(local_t *l) +{ + return ({ bool c; asm volatile (" " "incq" " " " " "%[var]" "\n\t/* output condition code " "e" "*/\n" : [var] "+m" (l->a.counter), "=@cc" "e" (c) : : "memory"); c; }); +} +# 94 "./arch/x86/include/asm/local.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool local_add_negative(long i, local_t *l) +{ + return ({ bool c; asm volatile (" " "addq" " " " %[val], " "%[var]" "\n\t/* output condition code " "s" "*/\n" : [var] "+m" (l->a.counter), "=@cc" "s" (c) : [val] "er" (i) : "memory"); c; }); +} +# 106 "./arch/x86/include/asm/local.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long local_add_return(long i, local_t *l) +{ + long __i = i; + asm volatile(" " "xaddq" " " "%0, %1;" + : "+r" (i), "+m" (l->a.counter) + : : "memory"); + return i + __i; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long local_sub_return(long i, local_t *l) +{ + return local_add_return(-i, l); +} +# 23 "./include/asm-generic/local64.h" 2 + +typedef struct { + local_t a; +} local64_t; +# 1 "./arch/x86/include/asm/local64.h" 2 +# 27 "./include/linux/perf_event.h" 2 + + +struct perf_guest_info_callbacks { + int (*is_in_guest)(void); + int (*is_user_mode)(void); + unsigned long (*get_guest_ip)(void); + void (*handle_intel_pt_intr)(void); +}; + + +# 1 "./arch/x86/include/asm/hw_breakpoint.h" 1 + + + + +# 1 "./arch/x86/include/uapi/asm/hw_breakpoint.h" 1 +# 6 "./arch/x86/include/asm/hw_breakpoint.h" 2 +# 14 "./arch/x86/include/asm/hw_breakpoint.h" +struct arch_hw_breakpoint { + unsigned long address; + unsigned long mask; + u8 len; + u8 type; +}; + +# 1 "./include/linux/kdebug.h" 1 + + + + +# 1 "./arch/x86/include/asm/kdebug.h" 1 + + + + + + +struct pt_regs; + + +enum die_val { + DIE_OOPS = 1, + DIE_INT3, + DIE_DEBUG, + DIE_PANIC, + DIE_NMI, + DIE_DIE, + DIE_KERNELDEBUG, + DIE_TRAP, + DIE_GPF, + DIE_CALL, + DIE_PAGE_FAULT, + DIE_NMIUNKNOWN, +}; + +enum show_regs_mode { + SHOW_REGS_SHORT, + + + + + SHOW_REGS_USER, + SHOW_REGS_ALL +}; + +extern void die(const char *, struct pt_regs *,long); +void die_addr(const char *str, struct pt_regs *regs, long err, long gp_addr); +extern int __attribute__((__warn_unused_result__)) __die(const char *, struct pt_regs *, long); +extern void show_stack_regs(struct pt_regs *regs); +extern void __show_regs(struct pt_regs *regs, enum show_regs_mode); +extern void show_iret_regs(struct pt_regs *regs); +extern unsigned long oops_begin(void); +extern void oops_end(unsigned long, struct pt_regs *, int signr); +# 6 "./include/linux/kdebug.h" 2 + +struct notifier_block; + +struct die_args { + struct pt_regs *regs; + const char *str; + long err; + int trapnr; + int signr; +}; + +int register_die_notifier(struct notifier_block *nb); +int unregister_die_notifier(struct notifier_block *nb); + +int notify_die(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig); +# 22 "./arch/x86/include/asm/hw_breakpoint.h" 2 +# 47 "./arch/x86/include/asm/hw_breakpoint.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hw_breakpoint_slots(int type) +{ + return 4; +} + +struct perf_event_attr; +struct perf_event; +struct pmu; + +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); + + +int arch_install_hw_breakpoint(struct perf_event *bp); +void arch_uninstall_hw_breakpoint(struct perf_event *bp); +void hw_breakpoint_pmu_read(struct perf_event *bp); +void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); + +extern void +arch_fill_perf_breakpoint(struct perf_event *bp); + +unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type); +int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type); + +extern int arch_bp_generic_fields(int x86_len, int x86_type, + int *gen_len, int *gen_type); + +extern struct pmu perf_ops_bp; +# 38 "./include/linux/perf_event.h" 2 +# 49 "./include/linux/perf_event.h" +# 1 "./include/linux/ftrace.h" 1 +# 10 "./include/linux/ftrace.h" +# 1 "./include/linux/trace_clock.h" 1 +# 16 "./include/linux/trace_clock.h" +# 1 "./arch/x86/include/asm/trace_clock.h" 1 +# 10 "./arch/x86/include/asm/trace_clock.h" +extern u64 __attribute__((no_instrument_function)) trace_clock_x86_tsc(void); +# 17 "./include/linux/trace_clock.h" 2 + +extern u64 __attribute__((no_instrument_function)) trace_clock_local(void); +extern u64 __attribute__((no_instrument_function)) trace_clock(void); +extern u64 __attribute__((no_instrument_function)) trace_clock_jiffies(void); +extern u64 __attribute__((no_instrument_function)) trace_clock_global(void); +extern u64 __attribute__((no_instrument_function)) trace_clock_counter(void); +# 11 "./include/linux/ftrace.h" 2 +# 1 "./include/linux/kallsyms.h" 1 +# 13 "./include/linux/kallsyms.h" +# 1 "./include/linux/module.h" 1 +# 16 "./include/linux/module.h" +# 1 "./include/linux/kmod.h" 1 +# 9 "./include/linux/kmod.h" +# 1 "./include/linux/umh.h" 1 +# 11 "./include/linux/umh.h" +struct cred; +struct file; + + + + + + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + const char *path; + char **argv; + char **envp; + struct file *file; + int wait; + int retval; + pid_t pid; + int (*init)(struct subprocess_info *info, struct cred *new); + void (*cleanup)(struct subprocess_info *info); + void *data; +} __attribute__((__designated_init__)); + +extern int +call_usermodehelper(const char *path, char **argv, char **envp, int wait); + +extern struct subprocess_info * +call_usermodehelper_setup(const char *path, char **argv, char **envp, + gfp_t gfp_mask, + int (*init)(struct subprocess_info *info, struct cred *new), + void (*cleanup)(struct subprocess_info *), void *data); + +struct subprocess_info *call_usermodehelper_setup_file(struct file *file, + int (*init)(struct subprocess_info *info, struct cred *new), + void (*cleanup)(struct subprocess_info *), void *data); +struct umh_info { + const char *cmdline; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct list_head list; + void (*cleanup)(struct umh_info *info); + pid_t pid; +}; +int fork_usermode_blob(void *data, size_t len, struct umh_info *info); + +extern int +call_usermodehelper_exec(struct subprocess_info *info, int wait); + +extern struct ctl_table usermodehelper_table[]; + +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING, + UMH_DISABLED, +}; + +extern int __usermodehelper_disable(enum umh_disable_depth depth); +extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int usermodehelper_disable(void) +{ + return __usermodehelper_disable(UMH_DISABLED); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void usermodehelper_enable(void) +{ + __usermodehelper_set_disable_depth(UMH_ENABLED); +} + +extern int usermodehelper_read_trylock(void); +extern long usermodehelper_read_lock_wait(long timeout); +extern void usermodehelper_read_unlock(void); +# 10 "./include/linux/kmod.h" 2 +# 20 "./include/linux/kmod.h" +extern char modprobe_path[]; + + +extern __attribute__((__format__(printf, 2, 3))) +int __request_module(bool wait, const char *name, ...); +# 17 "./include/linux/module.h" 2 + +# 1 "./include/linux/elf.h" 1 + + + + + +# 1 "./arch/x86/include/asm/elf.h" 1 +# 13 "./arch/x86/include/asm/elf.h" +# 1 "./arch/x86/include/asm/fsgsbase.h" 1 +# 17 "./arch/x86/include/asm/fsgsbase.h" +extern unsigned long x86_fsbase_read_task(struct task_struct *task); +extern unsigned long x86_gsbase_read_task(struct task_struct *task); +extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); +extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long x86_fsbase_read_cpu(void) +{ + unsigned long fsbase; + + do { fsbase = paravirt_read_msr(0xc0000100); } while (0); + + return fsbase; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long x86_gsbase_read_cpu_inactive(void) +{ + unsigned long gsbase; + + do { gsbase = paravirt_read_msr(0xc0000102); } while (0); + + return gsbase; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void x86_fsbase_write_cpu(unsigned long fsbase) +{ + wrmsrl(0xc0000100, fsbase); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void x86_gsbase_write_cpu_inactive(unsigned long gsbase) +{ + wrmsrl(0xc0000102, gsbase); +} +# 14 "./arch/x86/include/asm/elf.h" 2 + +typedef unsigned long elf_greg_t; + + +typedef elf_greg_t elf_gregset_t[(sizeof(struct user_regs_struct) / sizeof(elf_greg_t))]; + +typedef struct user_i387_struct elf_fpregset_t; +# 77 "./arch/x86/include/asm/elf.h" +# 1 "./arch/x86/include/asm/vdso.h" 1 +# 13 "./arch/x86/include/asm/vdso.h" +struct vdso_image { + void *data; + unsigned long size; + + unsigned long alt, alt_len; + + long sym_vvar_start; + + long sym_vvar_page; + long sym_pvclock_page; + long sym_hvclock_page; + long sym_timens_page; + long sym_VDSO32_NOTE_MASK; + long sym___kernel_sigreturn; + long sym___kernel_rt_sigreturn; + long sym___kernel_vsyscall; + long sym_int80_landing_pad; +}; + + +extern const struct vdso_image vdso_image_64; + + + +extern const struct vdso_image vdso_image_x32; + + + +extern const struct vdso_image vdso_image_32; + + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) init_vdso_image(const struct vdso_image *image); + +extern int map_vdso_once(const struct vdso_image *image, unsigned long addr); +# 78 "./arch/x86/include/asm/elf.h" 2 + + +extern unsigned int vdso64_enabled; + + +extern unsigned int vdso32_enabled; +# 172 "./arch/x86/include/asm/elf.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void elf_common_init(struct thread_struct *t, + struct pt_regs *regs, const u16 ds) +{ + + regs->bx = regs->cx = regs->dx = 0; + regs->si = regs->di = regs->bp = 0; + regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0; + regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; + t->fsbase = t->gsbase = 0; + t->fsindex = t->gsindex = 0; + t->ds = t->es = ds; +} + + + + + + + +void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp); + + +void set_personality_ia32(bool); +# 240 "./arch/x86/include/asm/elf.h" +extern void set_personality_64bit(void); +extern unsigned int sysctl_vsyscall32; +extern int force_personality32; +# 263 "./arch/x86/include/asm/elf.h" +extern u32 elf_hwcap2; +# 308 "./arch/x86/include/asm/elf.h" +struct task_struct; +# 321 "./arch/x86/include/asm/elf.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mmap_is_ia32(void) +{ + return 0 || + (1 && + test_ti_thread_flag(((struct thread_info *)get_current()), 29)); +} + +extern unsigned long task_size_32bit(void); +extern unsigned long task_size_64bit(int full_addr_space); +extern unsigned long get_mmap_base(int is_legacy); +extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len); +# 381 "./arch/x86/include/asm/elf.h" +struct linux_binprm; + + +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); +extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + + + +enum align_flags { + ALIGN_VA_32 = ((((1UL))) << (0)), + ALIGN_VA_64 = ((((1UL))) << (1)), +}; + +struct va_alignment { + int flags; + unsigned long mask; + unsigned long bits; +} __attribute__((__aligned__((1 << (6))))); + +extern struct va_alignment va_align; +extern unsigned long align_vdso_addr(unsigned long); +# 7 "./include/linux/elf.h" 2 +# 1 "./include/uapi/linux/elf.h" 1 + + + + + +# 1 "./include/uapi/linux/elf-em.h" 1 +# 7 "./include/uapi/linux/elf.h" 2 + + +typedef __u32 Elf32_Addr; +typedef __u16 Elf32_Half; +typedef __u32 Elf32_Off; +typedef __s32 Elf32_Sword; +typedef __u32 Elf32_Word; + + +typedef __u64 Elf64_Addr; +typedef __u16 Elf64_Half; +typedef __s16 Elf64_SHalf; +typedef __u64 Elf64_Off; +typedef __s32 Elf64_Sword; +typedef __u32 Elf64_Word; +typedef __u64 Elf64_Xword; +typedef __s64 Elf64_Sxword; +# 139 "./include/uapi/linux/elf.h" +typedef struct dynamic{ + Elf32_Sword d_tag; + union{ + Elf32_Sword d_val; + Elf32_Addr d_ptr; + } d_un; +} Elf32_Dyn; + +typedef struct { + Elf64_Sxword d_tag; + union { + Elf64_Xword d_val; + Elf64_Addr d_ptr; + } d_un; +} Elf64_Dyn; +# 162 "./include/uapi/linux/elf.h" +typedef struct elf32_rel { + Elf32_Addr r_offset; + Elf32_Word r_info; +} Elf32_Rel; + +typedef struct elf64_rel { + Elf64_Addr r_offset; + Elf64_Xword r_info; +} Elf64_Rel; + +typedef struct elf32_rela{ + Elf32_Addr r_offset; + Elf32_Word r_info; + Elf32_Sword r_addend; +} Elf32_Rela; + +typedef struct elf64_rela { + Elf64_Addr r_offset; + Elf64_Xword r_info; + Elf64_Sxword r_addend; +} Elf64_Rela; + +typedef struct elf32_sym{ + Elf32_Word st_name; + Elf32_Addr st_value; + Elf32_Word st_size; + unsigned char st_info; + unsigned char st_other; + Elf32_Half st_shndx; +} Elf32_Sym; + +typedef struct elf64_sym { + Elf64_Word st_name; + unsigned char st_info; + unsigned char st_other; + Elf64_Half st_shndx; + Elf64_Addr st_value; + Elf64_Xword st_size; +} Elf64_Sym; + + + + +typedef struct elf32_hdr{ + unsigned char e_ident[16]; + Elf32_Half e_type; + Elf32_Half e_machine; + Elf32_Word e_version; + Elf32_Addr e_entry; + Elf32_Off e_phoff; + Elf32_Off e_shoff; + Elf32_Word e_flags; + Elf32_Half e_ehsize; + Elf32_Half e_phentsize; + Elf32_Half e_phnum; + Elf32_Half e_shentsize; + Elf32_Half e_shnum; + Elf32_Half e_shstrndx; +} Elf32_Ehdr; + +typedef struct elf64_hdr { + unsigned char e_ident[16]; + Elf64_Half e_type; + Elf64_Half e_machine; + Elf64_Word e_version; + Elf64_Addr e_entry; + Elf64_Off e_phoff; + Elf64_Off e_shoff; + Elf64_Word e_flags; + Elf64_Half e_ehsize; + Elf64_Half e_phentsize; + Elf64_Half e_phnum; + Elf64_Half e_shentsize; + Elf64_Half e_shnum; + Elf64_Half e_shstrndx; +} Elf64_Ehdr; + + + + + + + +typedef struct elf32_phdr{ + Elf32_Word p_type; + Elf32_Off p_offset; + Elf32_Addr p_vaddr; + Elf32_Addr p_paddr; + Elf32_Word p_filesz; + Elf32_Word p_memsz; + Elf32_Word p_flags; + Elf32_Word p_align; +} Elf32_Phdr; + +typedef struct elf64_phdr { + Elf64_Word p_type; + Elf64_Word p_flags; + Elf64_Off p_offset; + Elf64_Addr p_vaddr; + Elf64_Addr p_paddr; + Elf64_Xword p_filesz; + Elf64_Xword p_memsz; + Elf64_Xword p_align; +} Elf64_Phdr; +# 304 "./include/uapi/linux/elf.h" +typedef struct elf32_shdr { + Elf32_Word sh_name; + Elf32_Word sh_type; + Elf32_Word sh_flags; + Elf32_Addr sh_addr; + Elf32_Off sh_offset; + Elf32_Word sh_size; + Elf32_Word sh_link; + Elf32_Word sh_info; + Elf32_Word sh_addralign; + Elf32_Word sh_entsize; +} Elf32_Shdr; + +typedef struct elf64_shdr { + Elf64_Word sh_name; + Elf64_Word sh_type; + Elf64_Xword sh_flags; + Elf64_Addr sh_addr; + Elf64_Off sh_offset; + Elf64_Xword sh_size; + Elf64_Word sh_link; + Elf64_Word sh_info; + Elf64_Xword sh_addralign; + Elf64_Xword sh_entsize; +} Elf64_Shdr; +# 438 "./include/uapi/linux/elf.h" +typedef struct elf32_note { + Elf32_Word n_namesz; + Elf32_Word n_descsz; + Elf32_Word n_type; +} Elf32_Nhdr; + + +typedef struct elf64_note { + Elf64_Word n_namesz; + Elf64_Word n_descsz; + Elf64_Word n_type; +} Elf64_Nhdr; +# 8 "./include/linux/elf.h" 2 +# 42 "./include/linux/elf.h" +extern Elf64_Dyn _DYNAMIC []; +# 55 "./include/linux/elf.h" +struct file; +struct coredump_params; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int elf_coredump_extra_notes_size(void) { return 0; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; } +# 71 "./include/linux/elf.h" +struct gnu_property { + u32 pr_type; + u32 pr_datasz; +}; + +struct arch_elf_state; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_parse_elf_property(u32 type, const void *data, + size_t datasz, bool compat, + struct arch_elf_state *arch) +{ + return 0; +} +# 94 "./include/linux/elf.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_elf_adjust_prot(int prot, + const struct arch_elf_state *state, + bool has_interp, bool is_interp) +{ + return prot; +} +# 19 "./include/linux/module.h" 2 + +# 1 "./include/linux/kobject.h" 1 +# 20 "./include/linux/kobject.h" +# 1 "./include/linux/sysfs.h" 1 +# 16 "./include/linux/sysfs.h" +# 1 "./include/linux/kernfs.h" 1 +# 20 "./include/linux/kernfs.h" +struct file; +struct dentry; +struct iattr; +struct seq_file; +struct vm_area_struct; +struct super_block; +struct file_system_type; +struct poll_table_struct; +struct fs_context; + +struct kernfs_fs_context; +struct kernfs_open_node; +struct kernfs_iattrs; + +enum kernfs_node_type { + KERNFS_DIR = 0x0001, + KERNFS_FILE = 0x0002, + KERNFS_LINK = 0x0004, +}; + + + + + + +enum kernfs_node_flag { + KERNFS_ACTIVATED = 0x0010, + KERNFS_NS = 0x0020, + KERNFS_HAS_SEQ_SHOW = 0x0040, + KERNFS_HAS_MMAP = 0x0080, + KERNFS_LOCKDEP = 0x0100, + KERNFS_SUICIDAL = 0x0400, + KERNFS_SUICIDED = 0x0800, + KERNFS_EMPTY_DIR = 0x1000, + KERNFS_HAS_RELEASE = 0x2000, +}; + + +enum kernfs_root_flag { + + + + + + + KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, +# 76 "./include/linux/kernfs.h" + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002, + + + + + + KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004, + + + + + KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008, +}; + + +struct kernfs_elem_dir { + unsigned long subdirs; + + struct rb_root children; + + + + + + struct kernfs_root *root; +}; + +struct kernfs_elem_symlink { + struct kernfs_node *target_kn; +}; + +struct kernfs_elem_attr { + const struct kernfs_ops *ops; + struct kernfs_open_node *open; + loff_t size; + struct kernfs_node *notify_next; +}; +# 123 "./include/linux/kernfs.h" +struct kernfs_node { + atomic_t count; + atomic_t active; + + struct lockdep_map dep_map; + + + + + + + + struct kernfs_node *parent; + const char *name; + + struct rb_node rb; + + const void *ns; + unsigned int hash; + union { + struct kernfs_elem_dir dir; + struct kernfs_elem_symlink symlink; + struct kernfs_elem_attr attr; + }; + + void *priv; + + + + + + u64 id; + + unsigned short flags; + umode_t mode; + struct kernfs_iattrs *iattr; +}; +# 168 "./include/linux/kernfs.h" +struct kernfs_syscall_ops { + int (*show_options)(struct seq_file *sf, struct kernfs_root *root); + + int (*mkdir)(struct kernfs_node *parent, const char *name, + umode_t mode); + int (*rmdir)(struct kernfs_node *kn); + int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent, + const char *new_name); + int (*show_path)(struct seq_file *sf, struct kernfs_node *kn, + struct kernfs_root *root); +}; + +struct kernfs_root { + + struct kernfs_node *kn; + unsigned int flags; + + + struct idr ino_idr; + u32 last_id_lowbits; + u32 id_highbits; + struct kernfs_syscall_ops *syscall_ops; + + + struct list_head supers; + + wait_queue_head_t deactivate_waitq; +}; + +struct kernfs_open_file { + + struct kernfs_node *kn; + struct file *file; + struct seq_file *seq_file; + void *priv; + + + struct mutex mutex; + struct mutex prealloc_mutex; + int event; + struct list_head list; + char *prealloc_buf; + + size_t atomic_write_len; + bool mmapped:1; + bool released:1; + const struct vm_operations_struct *vm_ops; +}; + +struct kernfs_ops { + + + + + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); +# 236 "./include/linux/kernfs.h" + int (*seq_show)(struct seq_file *sf, void *v); + + void *(*seq_start)(struct seq_file *sf, loff_t *ppos); + void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); + void (*seq_stop)(struct seq_file *sf, void *v); + + ssize_t (*read)(struct kernfs_open_file *of, char *buf, size_t bytes, + loff_t off); +# 252 "./include/linux/kernfs.h" + size_t atomic_write_len; + + + + + + + bool prealloc; + ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, + loff_t off); + + __poll_t (*poll)(struct kernfs_open_file *of, + struct poll_table_struct *pt); + + int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); + + + struct lock_class_key lockdep_key; + +}; + + + + +struct kernfs_fs_context { + struct kernfs_root *root; + void *ns_tag; + unsigned long magic; + + + bool new_sb_created; +}; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum kernfs_node_type kernfs_type(struct kernfs_node *kn) +{ + return kn->flags & 0x000f; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ino_t kernfs_id_ino(u64 id) +{ + + if (sizeof(ino_t) >= sizeof(u64)) + return id; + else + return (u32)id; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 kernfs_id_gen(u64 id) +{ + + if (sizeof(ino_t) >= sizeof(u64)) + return 1; + else + return id >> 32; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ino_t kernfs_ino(struct kernfs_node *kn) +{ + return kernfs_id_ino(kn->id); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ino_t kernfs_gen(struct kernfs_node *kn) +{ + return kernfs_id_gen(kn->id); +} +# 328 "./include/linux/kernfs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kernfs_enable_ns(struct kernfs_node *kn) +{ + ({ int __ret_warn_on = !!(kernfs_type(kn) != KERNFS_DIR); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (980)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/kernfs.h"), "i" (330), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (981)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (982)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + ({ int __ret_warn_on = !!(!(({ do { extern void __compiletime_assert_983(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&kn->dir.children)->rb_node) == sizeof(char) || sizeof((&kn->dir.children)->rb_node) == sizeof(short) || sizeof((&kn->dir.children)->rb_node) == sizeof(int) || sizeof((&kn->dir.children)->rb_node) == sizeof(long)) || sizeof((&kn->dir.children)->rb_node) == sizeof(long long))) __compiletime_assert_983(); } while (0); ({ typeof( _Generic(((&kn->dir.children)->rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&kn->dir.children)->rb_node))) __x = (*(const volatile typeof( _Generic(((&kn->dir.children)->rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&kn->dir.children)->rb_node))) *)&((&kn->dir.children)->rb_node)); do { } while (0); (typeof((&kn->dir.children)->rb_node))__x; }); }) == ((void *)0))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (984)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/kernfs.h"), "i" (331), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (985)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (986)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + kn->flags |= KERNFS_NS; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool kernfs_ns_enabled(struct kernfs_node *kn) +{ + return kn->flags & KERNFS_NS; +} + +int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen); +int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn, + char *buf, size_t buflen); +void pr_cont_kernfs_name(struct kernfs_node *kn); +void pr_cont_kernfs_path(struct kernfs_node *kn); +struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn); +struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, + const char *name, const void *ns); +struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, + const char *path, const void *ns); +void kernfs_get(struct kernfs_node *kn); +void kernfs_put(struct kernfs_node *kn); + +struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry); +struct kernfs_root *kernfs_root_from_sb(struct super_block *sb); +struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn); + +struct dentry *kernfs_node_dentry(struct kernfs_node *kn, + struct super_block *sb); +struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, + unsigned int flags, void *priv); +void kernfs_destroy_root(struct kernfs_root *root); + +struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, + const char *name, umode_t mode, + kuid_t uid, kgid_t gid, + void *priv, const void *ns); +struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, + const char *name); +struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, + const char *name, umode_t mode, + kuid_t uid, kgid_t gid, + loff_t size, + const struct kernfs_ops *ops, + void *priv, const void *ns, + struct lock_class_key *key); +struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, + const char *name, + struct kernfs_node *target); +void kernfs_activate(struct kernfs_node *kn); +void kernfs_remove(struct kernfs_node *kn); +void kernfs_break_active_protection(struct kernfs_node *kn); +void kernfs_unbreak_active_protection(struct kernfs_node *kn); +bool kernfs_remove_self(struct kernfs_node *kn); +int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, + const void *ns); +int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, + const char *new_name, const void *new_ns); +int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); +__poll_t kernfs_generic_poll(struct kernfs_open_file *of, + struct poll_table_struct *pt); +void kernfs_notify(struct kernfs_node *kn); + +int kernfs_xattr_get(struct kernfs_node *kn, const char *name, + void *value, size_t size); +int kernfs_xattr_set(struct kernfs_node *kn, const char *name, + const void *value, size_t size, int flags); + +const void *kernfs_super_ns(struct super_block *sb); +int kernfs_get_tree(struct fs_context *fc); +void kernfs_free_fs_context(struct fs_context *fc); +void kernfs_kill_sb(struct super_block *sb); + +void kernfs_init(void); + +struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, + u64 id); +# 540 "./include/linux/kernfs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen) +{ + return kernfs_path_from_node(kn, ((void *)0), buf, buflen); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kernfs_node * +kernfs_find_and_get(struct kernfs_node *kn, const char *name) +{ + return kernfs_find_and_get_ns(kn, name, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kernfs_node * +kernfs_walk_and_get(struct kernfs_node *kn, const char *path) +{ + return kernfs_walk_and_get_ns(kn, path, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kernfs_node * +kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, + void *priv) +{ + return kernfs_create_dir_ns(parent, name, mode, + (kuid_t){ 0 }, (kgid_t){ 0 }, + priv, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kernfs_node * +kernfs_create_file_ns(struct kernfs_node *parent, const char *name, + umode_t mode, kuid_t uid, kgid_t gid, + loff_t size, const struct kernfs_ops *ops, + void *priv, const void *ns) +{ + struct lock_class_key *key = ((void *)0); + + + key = (struct lock_class_key *)&ops->lockdep_key; + + return __kernfs_create_file(parent, name, mode, uid, gid, + size, ops, priv, ns, key); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kernfs_node * +kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode, + loff_t size, const struct kernfs_ops *ops, void *priv) +{ + return kernfs_create_file_ns(parent, name, mode, + (kuid_t){ 0 }, (kgid_t){ 0 }, + size, ops, priv, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kernfs_remove_by_name(struct kernfs_node *parent, + const char *name) +{ + return kernfs_remove_by_name_ns(parent, name, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kernfs_rename(struct kernfs_node *kn, + struct kernfs_node *new_parent, + const char *new_name) +{ + return kernfs_rename_ns(kn, new_parent, new_name, ((void *)0)); +} +# 17 "./include/linux/sysfs.h" 2 + + + + +# 1 "./include/linux/kobject_ns.h" 1 +# 19 "./include/linux/kobject_ns.h" +struct sock; +struct kobject; + + + + + +enum kobj_ns_type { + KOBJ_NS_TYPE_NONE = 0, + KOBJ_NS_TYPE_NET, + KOBJ_NS_TYPES +}; +# 39 "./include/linux/kobject_ns.h" +struct kobj_ns_type_operations { + enum kobj_ns_type type; + bool (*current_may_mount)(void); + void *(*grab_current_ns)(void); + const void *(*netlink_ns)(struct sock *sk); + const void *(*initial_ns)(void); + void (*drop_ns)(void *); +}; + +int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); +int kobj_ns_type_registered(enum kobj_ns_type type); +const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); +const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); + +bool kobj_ns_current_may_mount(enum kobj_ns_type type); +void *kobj_ns_grab_current(enum kobj_ns_type type); +const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); +const void *kobj_ns_initial(enum kobj_ns_type type); +void kobj_ns_drop(enum kobj_ns_type type, void *ns); +# 22 "./include/linux/sysfs.h" 2 + + + +struct kobject; +struct module; +struct bin_attribute; +enum kobj_ns_type; + +struct attribute { + const char *name; + umode_t mode; + + bool ignore_lockdep:1; + struct lock_class_key *key; + struct lock_class_key skey; + +}; +# 84 "./include/linux/sysfs.h" +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, + struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, + struct bin_attribute *, int); + struct attribute **attrs; + struct bin_attribute **bin_attrs; +}; +# 158 "./include/linux/sysfs.h" +struct file; +struct vm_area_struct; + +struct bin_attribute { + struct attribute attr; + size_t size; + void *private; + ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, + char *, loff_t, size_t); + ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, + char *, loff_t, size_t); + int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, + struct vm_area_struct *vma); +}; +# 223 "./include/linux/sysfs.h" +struct sysfs_ops { + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); +}; + + + +int __attribute__((__warn_unused_result__)) sysfs_create_dir_ns(struct kobject *kobj, const void *ns); +void sysfs_remove_dir(struct kobject *kobj); +int __attribute__((__warn_unused_result__)) sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, + const void *new_ns); +int __attribute__((__warn_unused_result__)) sysfs_move_dir_ns(struct kobject *kobj, + struct kobject *new_parent_kobj, + const void *new_ns); +int __attribute__((__warn_unused_result__)) sysfs_create_mount_point(struct kobject *parent_kobj, + const char *name); +void sysfs_remove_mount_point(struct kobject *parent_kobj, + const char *name); + +int __attribute__((__warn_unused_result__)) sysfs_create_file_ns(struct kobject *kobj, + const struct attribute *attr, + const void *ns); +int __attribute__((__warn_unused_result__)) sysfs_create_files(struct kobject *kobj, + const struct attribute * const *attr); +int __attribute__((__warn_unused_result__)) sysfs_chmod_file(struct kobject *kobj, + const struct attribute *attr, umode_t mode); +struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, + const struct attribute *attr); +void sysfs_unbreak_active_protection(struct kernfs_node *kn); +void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, + const void *ns); +bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); +void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr); + +int __attribute__((__warn_unused_result__)) sysfs_create_bin_file(struct kobject *kobj, + const struct bin_attribute *attr); +void sysfs_remove_bin_file(struct kobject *kobj, + const struct bin_attribute *attr); + +int __attribute__((__warn_unused_result__)) sysfs_create_link(struct kobject *kobj, struct kobject *target, + const char *name); +int __attribute__((__warn_unused_result__)) sysfs_create_link_nowarn(struct kobject *kobj, + struct kobject *target, + const char *name); +void sysfs_remove_link(struct kobject *kobj, const char *name); + +int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target, + const char *old_name, const char *new_name, + const void *new_ns); + +void sysfs_delete_link(struct kobject *dir, struct kobject *targ, + const char *name); + +int __attribute__((__warn_unused_result__)) sysfs_create_group(struct kobject *kobj, + const struct attribute_group *grp); +int __attribute__((__warn_unused_result__)) sysfs_create_groups(struct kobject *kobj, + const struct attribute_group **groups); +int __attribute__((__warn_unused_result__)) sysfs_update_groups(struct kobject *kobj, + const struct attribute_group **groups); +int sysfs_update_group(struct kobject *kobj, + const struct attribute_group *grp); +void sysfs_remove_group(struct kobject *kobj, + const struct attribute_group *grp); +void sysfs_remove_groups(struct kobject *kobj, + const struct attribute_group **groups); +int sysfs_add_file_to_group(struct kobject *kobj, + const struct attribute *attr, const char *group); +void sysfs_remove_file_from_group(struct kobject *kobj, + const struct attribute *attr, const char *group); +int sysfs_merge_group(struct kobject *kobj, + const struct attribute_group *grp); +void sysfs_unmerge_group(struct kobject *kobj, + const struct attribute_group *grp); +int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, + struct kobject *target, const char *link_name); +void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, + const char *link_name); +int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name); + +void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); + +int __attribute__((__warn_unused_result__)) sysfs_init(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sysfs_enable_ns(struct kernfs_node *kn) +{ + return kernfs_enable_ns(kn); +} + +int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, + kgid_t kgid); +int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid); +int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, + const char *name, kuid_t kuid, kgid_t kgid); +int sysfs_groups_change_owner(struct kobject *kobj, + const struct attribute_group **groups, + kuid_t kuid, kgid_t kgid); +int sysfs_group_change_owner(struct kobject *kobj, + const struct attribute_group *groups, kuid_t kuid, + kgid_t kgid); +# 574 "./include/linux/sysfs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) sysfs_create_file(struct kobject *kobj, + const struct attribute *attr) +{ + return sysfs_create_file_ns(kobj, attr, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sysfs_remove_file(struct kobject *kobj, + const struct attribute *attr) +{ + sysfs_remove_file_ns(kobj, attr, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sysfs_rename_link(struct kobject *kobj, struct kobject *target, + const char *old_name, const char *new_name) +{ + return sysfs_rename_link_ns(kobj, target, old_name, new_name, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sysfs_notify_dirent(struct kernfs_node *kn) +{ + kernfs_notify(kn); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent, + const char *name) +{ + return kernfs_find_and_get(parent, name); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kernfs_node *sysfs_get(struct kernfs_node *kn) +{ + kernfs_get(kn); + return kn; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sysfs_put(struct kernfs_node *kn) +{ + kernfs_put(kn); +} +# 21 "./include/linux/kobject.h" 2 +# 37 "./include/linux/kobject.h" +extern char uevent_helper[]; + + + +extern u64 uevent_seqnum; +# 53 "./include/linux/kobject.h" +enum kobject_action { + KOBJ_ADD, + KOBJ_REMOVE, + KOBJ_CHANGE, + KOBJ_MOVE, + KOBJ_ONLINE, + KOBJ_OFFLINE, + KOBJ_BIND, + KOBJ_UNBIND, + KOBJ_MAX +}; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + struct kobj_type *ktype; + struct kernfs_node *sd; + struct kref kref; + + struct delayed_work release; + + unsigned int state_initialized:1; + unsigned int state_in_sysfs:1; + unsigned int state_add_uevent_sent:1; + unsigned int state_remove_uevent_sent:1; + unsigned int uevent_suppress:1; +}; + +extern __attribute__((__format__(printf, 2, 3))) +int kobject_set_name(struct kobject *kobj, const char *name, ...); +extern __attribute__((__format__(printf, 2, 0))) +int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, + va_list vargs); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *kobject_name(const struct kobject *kobj) +{ + return kobj->name; +} + +extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype); +extern __attribute__((__format__(printf, 3, 4))) __attribute__((__warn_unused_result__)) +int kobject_add(struct kobject *kobj, struct kobject *parent, + const char *fmt, ...); +extern __attribute__((__format__(printf, 4, 5))) __attribute__((__warn_unused_result__)) +int kobject_init_and_add(struct kobject *kobj, + struct kobj_type *ktype, struct kobject *parent, + const char *fmt, ...); + +extern void kobject_del(struct kobject *kobj); + +extern struct kobject * __attribute__((__warn_unused_result__)) kobject_create(void); +extern struct kobject * __attribute__((__warn_unused_result__)) kobject_create_and_add(const char *name, + struct kobject *parent); + +extern int __attribute__((__warn_unused_result__)) kobject_rename(struct kobject *, const char *new_name); +extern int __attribute__((__warn_unused_result__)) kobject_move(struct kobject *, struct kobject *); + +extern struct kobject *kobject_get(struct kobject *kobj); +extern struct kobject * __attribute__((__warn_unused_result__)) kobject_get_unless_zero( + struct kobject *kobj); +extern void kobject_put(struct kobject *kobj); + +extern const void *kobject_namespace(struct kobject *kobj); +extern void kobject_get_ownership(struct kobject *kobj, + kuid_t *uid, kgid_t *gid); +extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); +# 132 "./include/linux/kobject.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool kobject_has_children(struct kobject *kobj) +{ + ({ int __ret_warn_on = !!(kref_read(&kobj->kref) == 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (987)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/kobject.h"), "i" (134), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (988)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (989)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + + return kobj->sd && kobj->sd->dir.subdirs; +} + +struct kobj_type { + void (*release)(struct kobject *kobj); + const struct sysfs_ops *sysfs_ops; + struct attribute **default_attrs; + const struct attribute_group **default_groups; + const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); + const void *(*namespace)(struct kobject *kobj); + void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); +}; + +struct kobj_uevent_env { + char *argv[3]; + char *envp[64]; + int envp_idx; + char buf[2048]; + int buflen; +}; + +struct kset_uevent_ops { + int (* const filter)(struct kset *kset, struct kobject *kobj); + const char *(* const name)(struct kset *kset, struct kobject *kobj); + int (* const uevent)(struct kset *kset, struct kobject *kobj, + struct kobj_uevent_env *env); +}; + +struct kobj_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); + ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count); +}; + +extern const struct sysfs_ops kobj_sysfs_ops; + +struct sock; +# 193 "./include/linux/kobject.h" +struct kset { + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + const struct kset_uevent_ops *uevent_ops; +} __attribute__((__designated_init__)); + +extern void kset_init(struct kset *kset); +extern int __attribute__((__warn_unused_result__)) kset_register(struct kset *kset); +extern void kset_unregister(struct kset *kset); +extern struct kset * __attribute__((__warn_unused_result__)) kset_create_and_add(const char *name, + const struct kset_uevent_ops *u, + struct kobject *parent_kobj); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kset *to_kset(struct kobject *kobj) +{ + return kobj ? ({ void *__mptr = (void *)(kobj); do { extern void __compiletime_assert_990(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(kobj)), typeof(((struct kset *)0)->kobj)) && !__builtin_types_compatible_p(typeof(*(kobj)), typeof(void))))) __compiletime_assert_990(); } while (0); ((struct kset *)(__mptr - __builtin_offsetof(struct kset, kobj))); }) : ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kset *kset_get(struct kset *k) +{ + return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kset_put(struct kset *k) +{ + kobject_put(&k->kobj); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct kobj_type *get_ktype(struct kobject *kobj) +{ + return kobj->ktype; +} + +extern struct kobject *kset_find_obj(struct kset *, const char *); + + +extern struct kobject *kernel_kobj; + +extern struct kobject *mm_kobj; + +extern struct kobject *hypervisor_kobj; + +extern struct kobject *power_kobj; + +extern struct kobject *firmware_kobj; + +int kobject_uevent(struct kobject *kobj, enum kobject_action action); +int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, + char *envp[]); +int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count); + +__attribute__((__format__(printf, 2, 3))) +int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...); +# 21 "./include/linux/module.h" 2 +# 1 "./include/linux/moduleparam.h" 1 +# 36 "./include/linux/moduleparam.h" +struct kernel_param; + + + + + + +enum { + KERNEL_PARAM_OPS_FL_NOARG = (1 << 0) +}; + +struct kernel_param_ops { + + unsigned int flags; + + int (*set)(const char *val, const struct kernel_param *kp); + + int (*get)(char *buffer, const struct kernel_param *kp); + + void (*free)(void *arg); +}; + + + + + + + +enum { + KERNEL_PARAM_FL_UNSAFE = (1 << 0), + KERNEL_PARAM_FL_HWPARAM = (1 << 1), +}; + +struct kernel_param { + const char *name; + struct module *mod; + const struct kernel_param_ops *ops; + const u16 perm; + s8 level; + u8 flags; + union { + void *arg; + const struct kparam_string *str; + const struct kparam_array *arr; + }; +}; + +extern const struct kernel_param __start___param[], __stop___param[]; + + +struct kparam_string { + unsigned int maxlen; + char *string; +}; + + +struct kparam_array +{ + unsigned int max; + unsigned int elemsize; + unsigned int *num; + const struct kernel_param_ops *ops; + void *elem; +}; +# 304 "./include/linux/moduleparam.h" +extern void kernel_param_lock(struct module *mod); +extern void kernel_param_unlock(struct module *mod); +# 372 "./include/linux/moduleparam.h" +extern bool parameq(const char *name1, const char *name2); +# 382 "./include/linux/moduleparam.h" +extern bool parameqn(const char *name1, const char *name2, size_t n); + + +extern char *parse_args(const char *name, + char *args, + const struct kernel_param *params, + unsigned num, + s16 level_min, + s16 level_max, + void *arg, + int (*unknown)(char *param, char *val, + const char *doing, void *arg)); + + + +extern void destroy_params(const struct kernel_param *params, unsigned num); +# 411 "./include/linux/moduleparam.h" +extern const struct kernel_param_ops param_ops_byte; +extern int param_set_byte(const char *val, const struct kernel_param *kp); +extern int param_get_byte(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_short; +extern int param_set_short(const char *val, const struct kernel_param *kp); +extern int param_get_short(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_ushort; +extern int param_set_ushort(const char *val, const struct kernel_param *kp); +extern int param_get_ushort(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_int; +extern int param_set_int(const char *val, const struct kernel_param *kp); +extern int param_get_int(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_uint; +extern int param_set_uint(const char *val, const struct kernel_param *kp); +extern int param_get_uint(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_long; +extern int param_set_long(const char *val, const struct kernel_param *kp); +extern int param_get_long(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_ulong; +extern int param_set_ulong(const char *val, const struct kernel_param *kp); +extern int param_get_ulong(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_ullong; +extern int param_set_ullong(const char *val, const struct kernel_param *kp); +extern int param_get_ullong(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_charp; +extern int param_set_charp(const char *val, const struct kernel_param *kp); +extern int param_get_charp(char *buffer, const struct kernel_param *kp); +extern void param_free_charp(void *arg); + + + +extern const struct kernel_param_ops param_ops_bool; +extern int param_set_bool(const char *val, const struct kernel_param *kp); +extern int param_get_bool(char *buffer, const struct kernel_param *kp); + + +extern const struct kernel_param_ops param_ops_bool_enable_only; +extern int param_set_bool_enable_only(const char *val, + const struct kernel_param *kp); + + + +extern const struct kernel_param_ops param_ops_invbool; +extern int param_set_invbool(const char *val, const struct kernel_param *kp); +extern int param_get_invbool(char *buffer, const struct kernel_param *kp); + + + +extern const struct kernel_param_ops param_ops_bint; +extern int param_set_bint(const char *val, const struct kernel_param *kp); +# 519 "./include/linux/moduleparam.h" +enum hwparam_type { + hwparam_ioport, + hwparam_iomem, + hwparam_ioport_or_iomem, + hwparam_irq, + hwparam_dma, + hwparam_dma_addr, + hwparam_other, +}; +# 580 "./include/linux/moduleparam.h" +extern const struct kernel_param_ops param_array_ops; + +extern const struct kernel_param_ops param_ops_string; +extern int param_set_copystring(const char *val, const struct kernel_param *); +extern int param_get_string(char *buffer, const struct kernel_param *kp); + + + +struct module; + + +extern int module_param_sysfs_setup(struct module *mod, + const struct kernel_param *kparam, + unsigned int num_params); + +extern void module_param_sysfs_remove(struct module *mod); +# 22 "./include/linux/module.h" 2 + + +# 1 "./include/linux/rbtree_latch.h" 1 +# 40 "./include/linux/rbtree_latch.h" +struct latch_tree_node { + struct rb_node node[2]; +}; + +struct latch_tree_root { + seqcount_t seq; + struct rb_root tree[2]; +}; +# 64 "./include/linux/rbtree_latch.h" +struct latch_tree_ops { + bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b); + int (*comp)(void *key, struct latch_tree_node *b); +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) struct latch_tree_node * +__lt_from_rb(struct rb_node *node, int idx) +{ + return ({ void *__mptr = (void *)(node); do { extern void __compiletime_assert_991(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(node)), typeof(((struct latch_tree_node *)0)->node[idx])) && !__builtin_types_compatible_p(typeof(*(node)), typeof(void))))) __compiletime_assert_991(); } while (0); ((struct latch_tree_node *)(__mptr - __builtin_offsetof(struct latch_tree_node, node[idx]))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +__lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx, + bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b)) +{ + struct rb_root *root = <r->tree[idx]; + struct rb_node **link = &root->rb_node; + struct rb_node *node = <n->node[idx]; + struct rb_node *parent = ((void *)0); + struct latch_tree_node *ltp; + + while (*link) { + parent = *link; + ltp = __lt_from_rb(parent, idx); + + if (less(ltn, ltp)) + link = &parent->rb_left; + else + link = &parent->rb_right; + } + + rb_link_node_rcu(node, parent, link); + rb_insert_color(node, root); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +__lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx) +{ + rb_erase(<n->node[idx], <r->tree[idx]); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) struct latch_tree_node * +__lt_find(void *key, struct latch_tree_root *ltr, int idx, + int (*comp)(void *key, struct latch_tree_node *node)) +{ + struct rb_node *node = ({ typeof(ltr->tree[idx].rb_node) ________p1 = ({ do { extern void __compiletime_assert_992(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ltr->tree[idx].rb_node) == sizeof(char) || sizeof(ltr->tree[idx].rb_node) == sizeof(short) || sizeof(ltr->tree[idx].rb_node) == sizeof(int) || sizeof(ltr->tree[idx].rb_node) == sizeof(long)) || sizeof(ltr->tree[idx].rb_node) == sizeof(long long))) __compiletime_assert_992(); } while (0); ({ typeof( _Generic((ltr->tree[idx].rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ltr->tree[idx].rb_node))) __x = (*(const volatile typeof( _Generic((ltr->tree[idx].rb_node), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ltr->tree[idx].rb_node))) *)&(ltr->tree[idx].rb_node)); do { } while (0); (typeof(ltr->tree[idx].rb_node))__x; }); }); ((typeof(*ltr->tree[idx].rb_node) *)(________p1)); }); + struct latch_tree_node *ltn; + int c; + + while (node) { + ltn = __lt_from_rb(node, idx); + c = comp(key, ltn); + + if (c < 0) + node = ({ typeof(node->rb_left) ________p1 = ({ do { extern void __compiletime_assert_993(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(node->rb_left) == sizeof(char) || sizeof(node->rb_left) == sizeof(short) || sizeof(node->rb_left) == sizeof(int) || sizeof(node->rb_left) == sizeof(long)) || sizeof(node->rb_left) == sizeof(long long))) __compiletime_assert_993(); } while (0); ({ typeof( _Generic((node->rb_left), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (node->rb_left))) __x = (*(const volatile typeof( _Generic((node->rb_left), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (node->rb_left))) *)&(node->rb_left)); do { } while (0); (typeof(node->rb_left))__x; }); }); ((typeof(*node->rb_left) *)(________p1)); }); + else if (c > 0) + node = ({ typeof(node->rb_right) ________p1 = ({ do { extern void __compiletime_assert_994(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(node->rb_right) == sizeof(char) || sizeof(node->rb_right) == sizeof(short) || sizeof(node->rb_right) == sizeof(int) || sizeof(node->rb_right) == sizeof(long)) || sizeof(node->rb_right) == sizeof(long long))) __compiletime_assert_994(); } while (0); ({ typeof( _Generic((node->rb_right), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (node->rb_right))) __x = (*(const volatile typeof( _Generic((node->rb_right), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (node->rb_right))) *)&(node->rb_right)); do { } while (0); (typeof(node->rb_right))__x; }); }); ((typeof(*node->rb_right) *)(________p1)); }); + else + return ltn; + } + + return ((void *)0); +} +# 143 "./include/linux/rbtree_latch.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +latch_tree_insert(struct latch_tree_node *node, + struct latch_tree_root *root, + const struct latch_tree_ops *ops) +{ + raw_write_seqcount_latch(&root->seq); + __lt_insert(node, root, 0, ops->less); + raw_write_seqcount_latch(&root->seq); + __lt_insert(node, root, 1, ops->less); +} +# 170 "./include/linux/rbtree_latch.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +latch_tree_erase(struct latch_tree_node *node, + struct latch_tree_root *root, + const struct latch_tree_ops *ops) +{ + raw_write_seqcount_latch(&root->seq); + __lt_erase(node, root, 0); + raw_write_seqcount_latch(&root->seq); + __lt_erase(node, root, 1); +} +# 199 "./include/linux/rbtree_latch.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) struct latch_tree_node * +latch_tree_find(void *key, struct latch_tree_root *root, + const struct latch_tree_ops *ops) +{ + struct latch_tree_node *node; + unsigned int seq; + + do { + seq = raw_read_seqcount_latch(&root->seq); + node = __lt_find(key, root, seq & 1, ops->comp); + } while (read_seqcount_retry(&root->seq, seq)); + + return node; +} +# 25 "./include/linux/module.h" 2 +# 1 "./include/linux/error-injection.h" 1 + + + + + +# 1 "./include/asm-generic/error-injection.h" 1 + + + + + +enum { + EI_ETYPE_NONE, + EI_ETYPE_NULL, + EI_ETYPE_ERRNO, + EI_ETYPE_ERRNO_NULL, + EI_ETYPE_TRUE, +}; + +struct error_injection_entry { + unsigned long addr; + int etype; +}; + +struct pt_regs; +# 34 "./include/asm-generic/error-injection.h" +void override_function_with_return(struct pt_regs *regs); +# 7 "./include/linux/error-injection.h" 2 + + + +extern bool within_error_injection_list(unsigned long addr); +extern int get_injectable_error_type(unsigned long addr); +# 26 "./include/linux/module.h" 2 + + + + +# 1 "./arch/x86/include/asm/module.h" 1 + + + + +# 1 "./include/asm-generic/module.h" 1 +# 6 "./arch/x86/include/asm/module.h" 2 +# 1 "./arch/x86/include/asm/orc_types.h" 1 +# 7 "./arch/x86/include/asm/module.h" 2 + +struct mod_arch_specific { + + unsigned int num_orcs; + int *orc_unwind_ip; + struct orc_entry *orc_unwind; + +}; +# 31 "./include/linux/module.h" 2 + + + + + + +struct modversion_info { + unsigned long crc; + char name[(64 - sizeof(unsigned long))]; +}; + +struct module; +struct exception_table_entry; + +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; + struct completion *kobj_completion; +} __attribute__((__designated_init__)); + +struct module_attribute { + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, + char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, + const char *, size_t count); + void (*setup)(struct module *, const char *); + int (*test)(struct module *); + void (*free)(struct module *); +}; + +struct module_version_attribute { + struct module_attribute mattr; + const char *module_name; + const char *version; +} __attribute__ ((__aligned__(sizeof(void *)))); + +extern ssize_t __modver_version_show(struct module_attribute *, + struct module_kobject *, char *); + +extern struct module_attribute module_uevent; + + +extern int init_module(void); +extern void cleanup_module(void); +# 291 "./include/linux/module.h" +struct notifier_block; + + + +extern int modules_disabled; + +void *__symbol_get(const char *symbol); +void *__symbol_get_gpl(const char *symbol); + + + +struct module_use { + struct list_head source_list; + struct list_head target_list; + struct module *source, *target; +}; + +enum module_state { + MODULE_STATE_LIVE, + MODULE_STATE_COMING, + MODULE_STATE_GOING, + MODULE_STATE_UNFORMED, +}; + +struct mod_tree_node { + struct module *mod; + struct latch_tree_node node; +}; + +struct module_layout { + + void *base; + + unsigned int size; + + unsigned int text_size; + + unsigned int ro_size; + + unsigned int ro_after_init_size; + + + struct mod_tree_node mtn; + +}; +# 344 "./include/linux/module.h" +struct mod_kallsyms { + Elf64_Sym *symtab; + unsigned int num_symtab; + char *strtab; + char *typetab; +}; + + +struct klp_modinfo { + Elf64_Ehdr hdr; + Elf64_Shdr *sechdrs; + char *secstrings; + unsigned int symndx; +}; + + +struct module { + enum module_state state; + + + struct list_head list; + + + char name[(64 - sizeof(unsigned long))]; + + + struct module_kobject mkobj; + struct module_attribute *modinfo_attrs; + const char *version; + const char *srcversion; + struct kobject *holders_dir; + + + const struct kernel_symbol *syms; + const s32 *crcs; + unsigned int num_syms; + + + + struct mutex param_lock; + + struct kernel_param *kp; + unsigned int num_kp; + + + unsigned int num_gpl_syms; + const struct kernel_symbol *gpl_syms; + const s32 *gpl_crcs; + + + + const struct kernel_symbol *unused_syms; + const s32 *unused_crcs; + unsigned int num_unused_syms; + + + unsigned int num_unused_gpl_syms; + const struct kernel_symbol *unused_gpl_syms; + const s32 *unused_gpl_crcs; + + + + + bool sig_ok; + + + bool async_probe_requested; + + + const struct kernel_symbol *gpl_future_syms; + const s32 *gpl_future_crcs; + unsigned int num_gpl_future_syms; + + + unsigned int num_exentries; + struct exception_table_entry *extable; + + + int (*init)(void); + + + struct module_layout core_layout __attribute__((__aligned__((1 << (6))))); + struct module_layout init_layout; + + + struct mod_arch_specific arch; + + unsigned long taints; + + + + unsigned num_bugs; + struct list_head bug_list; + struct bug_entry *bug_table; + + + + + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + + + struct module_sect_attrs *sect_attrs; + + + struct module_notes_attrs *notes_attrs; + + + + + char *args; + + + + void *percpu; + unsigned int percpu_size; + + void *noinstr_text_start; + unsigned int noinstr_text_size; + + + unsigned int num_tracepoints; + tracepoint_ptr_t *tracepoints_ptrs; + + + unsigned int num_srcu_structs; + struct srcu_struct **srcu_struct_ptrs; + + + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; + + + struct jump_entry *jump_entries; + unsigned int num_jump_entries; + + + unsigned int num_trace_bprintk_fmt; + const char **trace_bprintk_fmt_start; + + + struct trace_event_call **trace_events; + unsigned int num_trace_events; + struct trace_eval_map **trace_evals; + unsigned int num_trace_evals; + + + unsigned int num_ftrace_callsites; + unsigned long *ftrace_callsites; + + + void *kprobes_text_start; + unsigned int kprobes_text_size; + unsigned long *kprobe_blacklist; + unsigned int num_kprobe_blacklist; + + + + bool klp; + bool klp_alive; + + + struct klp_modinfo *klp_info; + + + + + struct list_head source_list; + + struct list_head target_list; + + + void (*exit)(void); + + atomic_t refcnt; + + + + + ctor_fn_t *ctors; + unsigned int num_ctors; + + + + struct error_injection_entry *ei_funcs; + unsigned int num_ei_funcs; + +} __attribute__((__aligned__((1 << (6))))) __attribute__((__designated_init__)); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long kallsyms_symbol_value(const Elf64_Sym *sym) +{ + return sym->st_value; +} + + +extern struct mutex module_mutex; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool module_is_live(struct module *mod) +{ + return mod->state != MODULE_STATE_GOING; +} + +struct module *__module_text_address(unsigned long addr); +struct module *__module_address(unsigned long addr); +bool is_module_address(unsigned long addr); +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr); +bool is_module_percpu_address(unsigned long addr); +bool is_module_text_address(unsigned long addr); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool within_module_core(unsigned long addr, + const struct module *mod) +{ + return (unsigned long)mod->core_layout.base <= addr && + addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool within_module_init(unsigned long addr, + const struct module *mod) +{ + return (unsigned long)mod->init_layout.base <= addr && + addr < (unsigned long)mod->init_layout.base + mod->init_layout.size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool within_module(unsigned long addr, const struct module *mod) +{ + return within_module_init(addr, mod) || within_module_core(addr, mod); +} + + +struct module *find_module(const char *name); + +struct symsearch { + const struct kernel_symbol *start, *stop; + const s32 *crcs; + enum { + NOT_GPL_ONLY, + GPL_ONLY, + WILL_BE_GPL_ONLY, + } licence; + bool unused; +}; + + + + + + +const struct kernel_symbol *find_symbol(const char *name, + struct module **owner, + const s32 **crc, + bool gplok, + bool warn); + + + + + + +bool each_symbol_section(bool (*fn)(const struct symsearch *arr, + struct module *owner, + void *data), void *data); + + + +int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *name, char *module_name, int *exported); + + +unsigned long module_kallsyms_lookup_name(const char *name); + +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); + +extern void __attribute__((__noreturn__)) __module_put_and_exit(struct module *mod, + long code); + + + +int module_refcount(struct module *mod); +void __symbol_put(const char *symbol); + +void symbol_put_addr(void *addr); + + + +extern void __module_get(struct module *module); + + + +extern bool try_module_get(struct module *module); + +extern void module_put(struct module *module); +# 660 "./include/linux/module.h" +int ref_module(struct module *a, struct module *b); +# 670 "./include/linux/module.h" +void *dereference_module_function_descriptor(struct module *mod, void *ptr); + + + + +const char *module_address_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, + char *namebuf); +int lookup_module_symbol_name(unsigned long addr, char *symname); +int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); + +int register_module_notifier(struct notifier_block *nb); +int unregister_module_notifier(struct notifier_block *nb); + +extern void print_modules(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool module_requested_async_probing(struct module *module) +{ + return module && module->async_probe_requested; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_livepatch_module(struct module *mod) +{ + return mod->klp; +} + + + + + + + +bool is_module_sig_enforced(void); +void set_module_sig_enforced(void); +# 858 "./include/linux/module.h" +extern struct kset *module_kset; +extern struct kobj_type module_ktype; +extern int module_sysfs_initialized; +# 870 "./include/linux/module.h" +void module_bug_finalize(const Elf64_Ehdr *, const Elf64_Shdr *, + struct module *); +void module_bug_cleanup(struct module *); +# 885 "./include/linux/module.h" +extern bool retpoline_module_ok(bool has_retpoline); +# 894 "./include/linux/module.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool module_sig_ok(struct module *module) +{ + return module->sig_ok; +} +# 14 "./include/linux/kallsyms.h" 2 + +# 1 "./arch/x86/include/asm/sections.h" 1 + + + + + + +# 1 "./include/asm-generic/sections.h" 1 +# 35 "./include/asm-generic/sections.h" +extern char _text[], _stext[], _etext[]; +extern char _data[], _sdata[], _edata[]; +extern char __bss_start[], __bss_stop[]; +extern char __init_begin[], __init_end[]; +extern char _sinittext[], _einittext[]; +extern char __start_ro_after_init[], __end_ro_after_init[]; +extern char _end[]; +extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; +extern char __kprobes_text_start[], __kprobes_text_end[]; +extern char __entry_text_start[], __entry_text_end[]; +extern char __start_rodata[], __end_rodata[]; +extern char __irqentry_text_start[], __irqentry_text_end[]; +extern char __softirqentry_text_start[], __softirqentry_text_end[]; +extern char __start_once[], __end_once[]; + + +extern char __ctors_start[], __ctors_end[]; + + +extern char __start_opd[], __end_opd[]; + + +extern char __noinstr_text_start[], __noinstr_text_end[]; + +extern __attribute__((__externally_visible__)) const void __nosave_begin, __nosave_end; +# 70 "./include/asm-generic/sections.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_is_kernel_text(unsigned long addr) +{ + return 0; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int arch_is_kernel_data(unsigned long addr) +{ + return 0; +} +# 108 "./include/asm-generic/sections.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool memory_contains(void *begin, void *end, void *virt, + size_t size) +{ + return virt >= begin && virt + size <= end; +} +# 125 "./include/asm-generic/sections.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool memory_intersects(void *begin, void *end, void *virt, + size_t size) +{ + void *vend = virt + size; + + return (virt >= begin && virt < end) || (vend >= begin && vend < end); +} +# 142 "./include/asm-generic/sections.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool init_section_contains(void *virt, size_t size) +{ + return memory_contains(__init_begin, __init_end, virt, size); +} +# 156 "./include/asm-generic/sections.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool init_section_intersects(void *virt, size_t size) +{ + return memory_intersects(__init_begin, __init_end, virt, size); +} +# 169 "./include/asm-generic/sections.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_kernel_rodata(unsigned long addr) +{ + return addr >= (unsigned long)__start_rodata && + addr < (unsigned long)__end_rodata; +} +# 8 "./arch/x86/include/asm/sections.h" 2 + + +extern char __brk_base[], __brk_limit[]; +extern char __end_rodata_aligned[]; + + +extern char __end_rodata_hpage_align[]; + + +extern char __end_of_kernel_reserve[]; + +extern unsigned long _brk_start, _brk_end; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_is_kernel_initmem_freed(unsigned long addr) +{ + + + + + if (_brk_start) + return 0; + + + + + + return addr >= _brk_end && addr < (unsigned long)&_end; +} +# 16 "./include/linux/kallsyms.h" 2 + + + + + +struct module; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_kernel_inittext(unsigned long addr) +{ + if (addr >= (unsigned long)_sinittext + && addr <= (unsigned long)_einittext) + return 1; + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_kernel_text(unsigned long addr) +{ + if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || + arch_is_kernel_text(addr)) + return 1; + return in_gate_area_no_mm(addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_kernel(unsigned long addr) +{ + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) + return 1; + return in_gate_area_no_mm(addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_ksym_addr(unsigned long addr) +{ + if (1) + return is_kernel(addr); + + return is_kernel_text(addr) || is_kernel_inittext(addr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *dereference_symbol_descriptor(void *ptr) +{ +# 70 "./include/linux/kallsyms.h" + return ptr; +} + + + +unsigned long kallsyms_lookup_name(const char *name); + + +int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, + unsigned long), + void *data); + +extern int kallsyms_lookup_size_offset(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset); + + +const char *kallsyms_lookup(unsigned long addr, + unsigned long *symbolsize, + unsigned long *offset, + char **modname, char *namebuf); + + +extern int sprint_symbol(char *buffer, unsigned long address); +extern int sprint_symbol_no_offset(char *buffer, unsigned long address); +extern int sprint_backtrace(char *buffer, unsigned long address); + +int lookup_symbol_name(unsigned long addr, char *symname); +int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); + + +extern int kallsyms_show_value(void); +# 168 "./include/linux/kallsyms.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void print_ip_sym(const char *loglvl, unsigned long ip) +{ + printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); +} +# 12 "./include/linux/ftrace.h" 2 +# 21 "./include/linux/ftrace.h" +# 1 "./arch/x86/include/asm/ftrace.h" 1 +# 19 "./arch/x86/include/asm/ftrace.h" +extern atomic_t modifying_ftrace_code; +extern void __fentry__(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long ftrace_call_adjust(unsigned long addr) +{ + + + + + return addr; +} +# 38 "./arch/x86/include/asm/ftrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) +{ + + regs->orig_ax = addr; +} + + + +struct dyn_arch_ftrace { + +}; +# 60 "./arch/x86/include/asm/ftrace.h" +extern void set_ftrace_ops_ro(void); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_syscall_match_sym_name(const char *sym, const char *name) +{ + + + + + return !strcmp(sym + 3, name + 3) || + (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) || + (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3)) || + (!strncmp(sym, "__do_sys", 8) && !strcmp(sym + 8, name + 3)); +} + + + + +# 1 "./include/linux/compat.h" 1 +# 15 "./include/linux/compat.h" +# 1 "./include/linux/socket.h" 1 + + + + + +# 1 "./arch/x86/include/generated/uapi/asm/socket.h" 1 +# 1 "./include/uapi/asm-generic/socket.h" 1 + + + + + +# 1 "./arch/x86/include/generated/uapi/asm/sockios.h" 1 +# 1 "./include/uapi/asm-generic/sockios.h" 1 +# 1 "./arch/x86/include/generated/uapi/asm/sockios.h" 2 +# 7 "./include/uapi/asm-generic/socket.h" 2 +# 1 "./arch/x86/include/generated/uapi/asm/socket.h" 2 +# 7 "./include/linux/socket.h" 2 +# 1 "./include/uapi/linux/sockios.h" 1 +# 23 "./include/uapi/linux/sockios.h" +# 1 "./arch/x86/include/generated/uapi/asm/sockios.h" 1 +# 24 "./include/uapi/linux/sockios.h" 2 +# 8 "./include/linux/socket.h" 2 +# 1 "./include/linux/uio.h" 1 +# 10 "./include/linux/uio.h" +# 1 "./include/crypto/hash.h" 1 +# 11 "./include/crypto/hash.h" +# 1 "./include/linux/crypto.h" 1 +# 19 "./include/linux/crypto.h" +# 1 "./include/linux/slab.h" 1 +# 136 "./include/linux/slab.h" +# 1 "./include/linux/kasan.h" 1 + + + + + + +struct kmem_cache; +struct page; +struct vm_struct; +struct task_struct; + + + + +# 1 "./arch/x86/include/asm/kasan.h" 1 +# 29 "./arch/x86/include/asm/kasan.h" +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) kasan_early_init(void); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) kasan_init(void); +# 16 "./include/linux/kasan.h" 2 + +extern unsigned char kasan_early_shadow_page[((1UL) << 12)]; +extern pte_t kasan_early_shadow_pte[512]; +extern pmd_t kasan_early_shadow_pmd[512]; +extern pud_t kasan_early_shadow_pud[512]; +extern p4d_t kasan_early_shadow_p4d[512]; + +int kasan_populate_early_shadow(const void *shadow_start, + const void *shadow_end); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kasan_mem_to_shadow(const void *addr) +{ + return (void *)((unsigned long)addr >> 3) + + (0xdffffc0000000000UL); +} + + +extern void kasan_enable_current(void); + + +extern void kasan_disable_current(void); + +void kasan_unpoison_shadow(const void *address, size_t size); + +void kasan_unpoison_task_stack(struct task_struct *task); +void kasan_unpoison_stack_above_sp_to(const void *watermark); + +void kasan_alloc_pages(struct page *page, unsigned int order); +void kasan_free_pages(struct page *page, unsigned int order); + +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, + slab_flags_t *flags); + +void kasan_poison_slab(struct page *page); +void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); +void kasan_poison_object_data(struct kmem_cache *cache, void *object); +void * __attribute__((__warn_unused_result__)) kasan_init_slab_obj(struct kmem_cache *cache, + const void *object); + +void * __attribute__((__warn_unused_result__)) kasan_kmalloc_large(const void *ptr, size_t size, + gfp_t flags); +void kasan_kfree_large(void *ptr, unsigned long ip); +void kasan_poison_kfree(void *ptr, unsigned long ip); +void * __attribute__((__warn_unused_result__)) kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size, gfp_t flags); +void * __attribute__((__warn_unused_result__)) kasan_krealloc(const void *object, size_t new_size, + gfp_t flags); + +void * __attribute__((__warn_unused_result__)) kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags); +bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); + +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; +# 82 "./include/linux/kasan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kasan_module_alloc(void *addr, size_t size) { return 0; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kasan_free_shadow(const struct vm_struct *vm) {} + + +int kasan_add_zero_shadow(void *start, unsigned long size); +void kasan_remove_zero_shadow(void *start, unsigned long size); + +size_t __ksize(const void *); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kasan_unpoison_slab(const void *ptr) +{ + kasan_unpoison_shadow(ptr, __ksize(ptr)); +} +size_t kasan_metadata_size(struct kmem_cache *cache); + +bool kasan_save_enable_multi_shot(void); +void kasan_restore_multi_shot(bool enabled); +# 175 "./include/linux/kasan.h" +void kasan_cache_shrink(struct kmem_cache *cache); +void kasan_cache_shutdown(struct kmem_cache *cache); +# 198 "./include/linux/kasan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kasan_init_tags(void) { } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kasan_reset_tag(const void *addr) +{ + return (void *)addr; +} + + + + +int kasan_populate_vmalloc(unsigned long addr, unsigned long size); +void kasan_poison_vmalloc(const void *start, unsigned long size); +void kasan_unpoison_vmalloc(const void *start, unsigned long size); +void kasan_release_vmalloc(unsigned long start, unsigned long end, + unsigned long free_region_start, + unsigned long free_region_end); +# 234 "./include/linux/kasan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kasan_non_canonical_hook(unsigned long addr) { } +# 137 "./include/linux/slab.h" 2 + +struct mem_cgroup; + + + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) kmem_cache_init(void); +bool slab_is_available(void); + +extern bool usercopy_fallback; + +struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, + void (*ctor)(void *)); +struct kmem_cache *kmem_cache_create_usercopy(const char *name, + unsigned int size, unsigned int align, + slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)); +void kmem_cache_destroy(struct kmem_cache *); +int kmem_cache_shrink(struct kmem_cache *); + +void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); +void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); +# 187 "./include/linux/slab.h" +void * __attribute__((__warn_unused_result__)) krealloc(const void *, size_t, gfp_t); +void kfree(const void *); +void kzfree(const void *); +size_t __ksize(const void *); +size_t ksize(const void *); + + +void __check_heap_object(const void *ptr, unsigned long n, struct page *page, + bool to_user); +# 308 "./include/linux/slab.h" +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_RECLAIM, + + KMALLOC_DMA, + + NR_KMALLOC_TYPES +}; + + +extern struct kmem_cache * +kmalloc_caches[NR_KMALLOC_TYPES][(12 + 1) + 1]; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) enum kmalloc_cache_type kmalloc_type(gfp_t flags) +{ + + + + + + if (__builtin_expect(!!((flags & ((( gfp_t)0x01u) | (( gfp_t)0x10u))) == 0), 1)) + return KMALLOC_NORMAL; + + + + + + return flags & (( gfp_t)0x01u) ? KMALLOC_DMA : KMALLOC_RECLAIM; + + + +} +# 349 "./include/linux/slab.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned int kmalloc_index(size_t size) +{ + if (!size) + return 0; + + if (size <= (1 << 3)) + return 3; + + if ((1 << 3) <= 32 && size > 64 && size <= 96) + return 1; + if ((1 << 3) <= 64 && size > 128 && size <= 192) + return 2; + if (size <= 8) return 3; + if (size <= 16) return 4; + if (size <= 32) return 5; + if (size <= 64) return 6; + if (size <= 128) return 7; + if (size <= 256) return 8; + if (size <= 512) return 9; + if (size <= 1024) return 10; + if (size <= 2 * 1024) return 11; + if (size <= 4 * 1024) return 12; + if (size <= 8 * 1024) return 13; + if (size <= 16 * 1024) return 14; + if (size <= 32 * 1024) return 15; + if (size <= 64 * 1024) return 16; + if (size <= 128 * 1024) return 17; + if (size <= 256 * 1024) return 18; + if (size <= 512 * 1024) return 19; + if (size <= 1024 * 1024) return 20; + if (size <= 2 * 1024 * 1024) return 21; + if (size <= 4 * 1024 * 1024) return 22; + if (size <= 8 * 1024 * 1024) return 23; + if (size <= 16 * 1024 * 1024) return 24; + if (size <= 32 * 1024 * 1024) return 25; + if (size <= 64 * 1024 * 1024) return 26; + do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (995)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/slab.h"), "i" (385), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (996)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); + + + return -1; +} + + +void *__kmalloc(size_t size, gfp_t flags) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__malloc__)); +void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__malloc__)); +void kmem_cache_free(struct kmem_cache *, void *); +# 403 "./include/linux/slab.h" +void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); +int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void kfree_bulk(size_t size, void **p) +{ + kmem_cache_free_bulk(((void *)0), size, p); +} + + +void *__kmalloc_node(size_t size, gfp_t flags, int node) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__malloc__)); +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__malloc__)); +# 431 "./include/linux/slab.h" +extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__malloc__)); + + +extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, + gfp_t gfpflags, + int node, size_t size) __attribute__((__assume_aligned__(__alignof__(unsigned long long)))) __attribute__((__malloc__)); +# 469 "./include/linux/slab.h" +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __attribute__((__assume_aligned__(((1UL) << 12)))) __attribute__((__malloc__)); + + +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __attribute__((__assume_aligned__(((1UL) << 12)))) __attribute__((__malloc__)); +# 481 "./include/linux/slab.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void *kmalloc_large(size_t size, gfp_t flags) +{ + unsigned int order = get_order(size); + return kmalloc_order_trace(size, flags, order); +} +# 541 "./include/linux/slab.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void *kmalloc(size_t size, gfp_t flags) +{ + if (__builtin_constant_p(size)) { + + unsigned int index; + + if (size > (1UL << (12 + 1))) + return kmalloc_large(size, flags); + + index = kmalloc_index(size); + + if (!index) + return ((void *)16); + + return kmem_cache_alloc_trace( + kmalloc_caches[kmalloc_type(flags)][index], + flags, size); + + } + return __kmalloc(size, flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + + if (__builtin_constant_p(size) && + size <= (1UL << (12 + 1))) { + unsigned int i = kmalloc_index(size); + + if (!i) + return ((void *)16); + + return kmem_cache_alloc_node_trace( + kmalloc_caches[kmalloc_type(flags)][i], + flags, node, size); + } + + return __kmalloc_node(size, flags, node); +} + +int memcg_update_all_caches(int num_memcgs); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kmalloc_array(size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (__builtin_expect(!!(({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })), 0)) + return ((void *)0); + if (__builtin_constant_p(n) && __builtin_constant_p(size)) + return kmalloc(bytes, flags); + return __kmalloc(bytes, flags); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kcalloc(size_t n, size_t size, gfp_t flags) +{ + return kmalloc_array(n, size, flags | (( gfp_t)0x100u)); +} +# 619 "./include/linux/slab.h" +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, + int node) +{ + size_t bytes; + + if (__builtin_expect(!!(({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })), 0)) + return ((void *)0); + if (__builtin_constant_p(n) && __builtin_constant_p(size)) + return kmalloc_node(bytes, flags, node); + return __kmalloc_node(bytes, flags, node); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) +{ + return kmalloc_array_node(n, size, flags | (( gfp_t)0x100u), node); +} + + + +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); +# 657 "./include/linux/slab.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) +{ + return kmem_cache_alloc(k, flags | (( gfp_t)0x100u)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kzalloc(size_t size, gfp_t flags) +{ + return kmalloc(size, flags | (( gfp_t)0x100u)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kzalloc_node(size_t size, gfp_t flags, int node) +{ + return kmalloc_node(size, flags | (( gfp_t)0x100u), node); +} + +unsigned int kmem_cache_size(struct kmem_cache *s); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) kmem_cache_init_late(void); +# 20 "./include/linux/crypto.h" 2 +# 131 "./include/linux/crypto.h" +struct scatterlist; +struct crypto_async_request; +struct crypto_tfm; +struct crypto_type; + +typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); +# 145 "./include/linux/crypto.h" +struct crypto_async_request { + struct list_head list; + crypto_completion_t complete; + void *data; + struct crypto_tfm *tfm; + + u32 flags; +}; +# 208 "./include/linux/crypto.h" +struct cipher_alg { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +}; +# 228 "./include/linux/crypto.h" +struct compress_alg { + int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); + int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); +}; +# 244 "./include/linux/crypto.h" +struct crypto_istat_aead { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; +# 262 "./include/linux/crypto.h" +struct crypto_istat_akcipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t verify_cnt; + atomic64_t sign_cnt; + atomic64_t err_cnt; +}; +# 280 "./include/linux/crypto.h" +struct crypto_istat_cipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; +# 296 "./include/linux/crypto.h" +struct crypto_istat_compress { + atomic64_t compress_cnt; + atomic64_t compress_tlen; + atomic64_t decompress_cnt; + atomic64_t decompress_tlen; + atomic64_t err_cnt; +}; + + + + + + + +struct crypto_istat_hash { + atomic64_t hash_cnt; + atomic64_t hash_tlen; + atomic64_t err_cnt; +}; +# 323 "./include/linux/crypto.h" +struct crypto_istat_kpp { + atomic64_t setsecret_cnt; + atomic64_t generate_public_key_cnt; + atomic64_t compute_shared_secret_cnt; + atomic64_t err_cnt; +}; +# 337 "./include/linux/crypto.h" +struct crypto_istat_rng { + atomic64_t generate_cnt; + atomic64_t generate_tlen; + atomic64_t seed_cnt; + atomic64_t err_cnt; +}; +# 435 "./include/linux/crypto.h" +struct crypto_alg { + struct list_head cra_list; + struct list_head cra_users; + + u32 cra_flags; + unsigned int cra_blocksize; + unsigned int cra_ctxsize; + unsigned int cra_alignmask; + + int cra_priority; + refcount_t cra_refcnt; + + char cra_name[128]; + char cra_driver_name[128]; + + const struct crypto_type *cra_type; + + union { + struct cipher_alg cipher; + struct compress_alg compress; + } cra_u; + + int (*cra_init)(struct crypto_tfm *tfm); + void (*cra_exit)(struct crypto_tfm *tfm); + void (*cra_destroy)(struct crypto_alg *alg); + + struct module *cra_module; + + + union { + struct crypto_istat_aead aead; + struct crypto_istat_akcipher akcipher; + struct crypto_istat_cipher cipher; + struct crypto_istat_compress compress; + struct crypto_istat_hash hash; + struct crypto_istat_rng rng; + struct crypto_istat_kpp kpp; + } stats; + + +} __attribute__ ((__aligned__(__alignof__(unsigned long long)))); + + +void crypto_stats_init(struct crypto_alg *alg); +void crypto_stats_get(struct crypto_alg *alg); +void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); +void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); +void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg); +void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg); +void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg); +void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret); +void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret); +void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret); +void crypto_stats_rng_seed(struct crypto_alg *alg, int ret); +void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret); +void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); +void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); +# 540 "./include/linux/crypto.h" +struct crypto_wait { + struct completion completion; + int err; +}; +# 555 "./include/linux/crypto.h" +void crypto_req_done(struct crypto_async_request *req, int err); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_wait_req(int err, struct crypto_wait *wait) +{ + switch (err) { + case -115: + case -16: + wait_for_completion(&wait->completion); + reinit_completion(&wait->completion); + err = wait->err; + break; + } + + return err; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_init_wait(struct crypto_wait *wait) +{ + __init_completion(&wait->completion); +} + + + + +int crypto_register_alg(struct crypto_alg *alg); +void crypto_unregister_alg(struct crypto_alg *alg); +int crypto_register_algs(struct crypto_alg *algs, int count); +void crypto_unregister_algs(struct crypto_alg *algs, int count); + + + + +int crypto_has_alg(const char *name, u32 type, u32 mask); + + + + + + + +struct crypto_tfm { + + u32 crt_flags; + + void (*exit)(struct crypto_tfm *tfm); + + struct crypto_alg *__crt_alg; + + void *__crt_ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); +}; + +struct crypto_cipher { + struct crypto_tfm base; +}; + +struct crypto_comp { + struct crypto_tfm base; +}; + +enum { + CRYPTOA_UNSPEC, + CRYPTOA_ALG, + CRYPTOA_TYPE, + CRYPTOA_U32, + __CRYPTOA_MAX, +}; + + + + + + +struct crypto_attr_alg { + char name[128]; +}; + +struct crypto_attr_type { + u32 type; + u32 mask; +}; + +struct crypto_attr_u32 { + u32 num; +}; + + + + + +struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); +void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_free_tfm(struct crypto_tfm *tfm) +{ + return crypto_destroy_tfm(tfm, tfm); +} + +int alg_test(const char *driver, const char *alg, u32 type, u32 mask); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_name; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_driver_name; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_tfm_alg_priority(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_priority; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & 0x0000000f; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_blocksize; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_alignmask; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) +{ + return tfm->crt_flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) +{ + tfm->crt_flags |= flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) +{ + tfm->crt_flags &= ~flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *crypto_tfm_ctx(struct crypto_tfm *tfm) +{ + return tfm->__crt_ctx; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_tfm_ctx_alignment(void) +{ + struct crypto_tfm *tfm; + return __alignof__(tfm->__crt_ctx); +} +# 730 "./include/linux/crypto.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_cipher *)tfm; +} +# 749 "./include/linux/crypto.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~0x0000000f; + type |= 0x00000001; + mask |= 0x0000000f; + + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) +{ + return &tfm->base; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_free_cipher(struct crypto_cipher *tfm) +{ + crypto_free_tfm(crypto_cipher_tfm(tfm)); +} +# 783 "./include/linux/crypto.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~0x0000000f; + type |= 0x00000001; + mask |= 0x0000000f; + + return crypto_has_alg(alg_name, type, mask); +} +# 802 "./include/linux/crypto.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) +{ + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_cipher_set_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_cipher_clear_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); +} +# 845 "./include/linux/crypto.h" +int crypto_cipher_setkey(struct crypto_cipher *tfm, + const u8 *key, unsigned int keylen); +# 857 "./include/linux/crypto.h" +void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); +# 869 "./include/linux/crypto.h" +void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_comp *)tfm; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_comp *crypto_alloc_comp(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~0x0000000f; + type |= 0x00000002; + mask |= 0x0000000f; + + return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) +{ + return &tfm->base; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_free_comp(struct crypto_comp *tfm) +{ + crypto_free_tfm(crypto_comp_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_has_comp(const char *alg_name, u32 type, u32 mask) +{ + type &= ~0x0000000f; + type |= 0x00000002; + mask |= 0x0000000f; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *crypto_comp_name(struct crypto_comp *tfm) +{ + return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); +} + +int crypto_comp_compress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); + +int crypto_comp_decompress(struct crypto_comp *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen); +# 12 "./include/crypto/hash.h" 2 + + +struct crypto_ahash; +# 42 "./include/crypto/hash.h" +struct hash_alg_common { + unsigned int digestsize; + unsigned int statesize; + + struct crypto_alg base; +}; + +struct ahash_request { + struct crypto_async_request base; + + unsigned int nbytes; + struct scatterlist *src; + u8 *result; + + + void *priv; + + void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); +}; +# 128 "./include/crypto/hash.h" +struct ahash_alg { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + struct hash_alg_common halg; +}; + +struct shash_desc { + struct crypto_shash *tfm; + void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); +}; +# 190 "./include/crypto/hash.h" +struct shash_alg { + int (*init)(struct shash_desc *desc); + int (*update)(struct shash_desc *desc, const u8 *data, + unsigned int len); + int (*final)(struct shash_desc *desc, u8 *out); + int (*finup)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*digest)(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + int (*export)(struct shash_desc *desc, void *out); + int (*import)(struct shash_desc *desc, const void *in); + int (*setkey)(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + int (*init_tfm)(struct crypto_shash *tfm); + void (*exit_tfm)(struct crypto_shash *tfm); + + unsigned int descsize; + + + unsigned int digestsize + __attribute__ ((aligned(__alignof__(struct hash_alg_common)))); + unsigned int statesize; + + struct crypto_alg base; +}; + +struct crypto_ahash { + int (*init)(struct ahash_request *req); + int (*update)(struct ahash_request *req); + int (*final)(struct ahash_request *req); + int (*finup)(struct ahash_request *req); + int (*digest)(struct ahash_request *req); + int (*export)(struct ahash_request *req, void *out); + int (*import)(struct ahash_request *req, const void *in); + int (*setkey)(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int reqsize; + struct crypto_tfm base; +}; + +struct crypto_shash { + unsigned int descsize; + struct crypto_tfm base; +}; +# 246 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) +{ + return ({ void *__mptr = (void *)(tfm); do { extern void __compiletime_assert_997(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(tfm)), typeof(((struct crypto_ahash *)0)->base)) && !__builtin_types_compatible_p(typeof(*(tfm)), typeof(void))))) __compiletime_assert_997(); } while (0); ((struct crypto_ahash *)(__mptr - __builtin_offsetof(struct crypto_ahash, base))); }); +} +# 265 "./include/crypto/hash.h" +struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, + u32 mask); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) +{ + return &tfm->base; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_free_ahash(struct crypto_ahash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); +} +# 292 "./include/crypto/hash.h" +int crypto_has_ahash(const char *alg_name, u32 type, u32 mask); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *crypto_ahash_alg_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *crypto_ahash_driver_name(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_ahash_alignmask( + struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); +} +# 319 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hash_alg_common *__crypto_hash_alg_common( + struct crypto_alg *alg) +{ + return ({ void *__mptr = (void *)(alg); do { extern void __compiletime_assert_998(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(alg)), typeof(((struct hash_alg_common *)0)->base)) && !__builtin_types_compatible_p(typeof(*(alg)), typeof(void))))) __compiletime_assert_998(); } while (0); ((struct hash_alg_common *)(__mptr - __builtin_offsetof(struct hash_alg_common, base))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hash_alg_common *crypto_hash_alg_common( + struct crypto_ahash *tfm) +{ + return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); +} +# 346 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->digestsize; +} +# 361 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->statesize; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) +{ + return crypto_tfm_get_flags(crypto_ahash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); +} +# 391 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_ahash *crypto_ahash_reqtfm( + struct ahash_request *req) +{ + return __crypto_ahash_cast(req->base.tfm); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) +{ + return tfm->reqsize; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *ahash_request_ctx(struct ahash_request *req) +{ + return req->__ctx; +} +# 424 "./include/crypto/hash.h" +int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen); +# 438 "./include/crypto/hash.h" +int crypto_ahash_finup(struct ahash_request *req); +# 455 "./include/crypto/hash.h" +int crypto_ahash_final(struct ahash_request *req); +# 468 "./include/crypto/hash.h" +int crypto_ahash_digest(struct ahash_request *req); +# 481 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_ahash_export(struct ahash_request *req, void *out) +{ + return crypto_ahash_reqtfm(req)->export(req, out); +} +# 497 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_ahash_import(struct ahash_request *req, const void *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & 0x00000001) + return -126; + + return tfm->import(req, in); +} +# 518 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_ahash_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & 0x00000001) + return -126; + + return tfm->init(req); +} +# 539 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_ahash_update(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; + int ret; + + crypto_stats_get(alg); + ret = crypto_ahash_reqtfm(req)->update(req); + crypto_stats_ahash_update(nbytes, ret, alg); + return ret; +} +# 571 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ahash_request_set_tfm(struct ahash_request *req, + struct crypto_ahash *tfm) +{ + req->base.tfm = crypto_ahash_tfm(tfm); +} +# 589 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct ahash_request *ahash_request_alloc( + struct crypto_ahash *tfm, gfp_t gfp) +{ + struct ahash_request *req; + + req = kmalloc(sizeof(struct ahash_request) + + crypto_ahash_reqsize(tfm), gfp); + + if (__builtin_expect(!!(req), 1)) + ahash_request_set_tfm(req, tfm); + + return req; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ahash_request_free(struct ahash_request *req) +{ + kzfree(req); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ahash_request_zero(struct ahash_request *req) +{ + memzero_explicit(req, sizeof(*req) + + crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct ahash_request *ahash_request_cast( + struct crypto_async_request *req) +{ + return ({ void *__mptr = (void *)(req); do { extern void __compiletime_assert_999(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(req)), typeof(((struct ahash_request *)0)->base)) && !__builtin_types_compatible_p(typeof(*(req)), typeof(void))))) __compiletime_assert_999(); } while (0); ((struct ahash_request *)(__mptr - __builtin_offsetof(struct ahash_request, base))); }); +} +# 649 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ahash_request_set_callback(struct ahash_request *req, + u32 flags, + crypto_completion_t compl, + void *data) +{ + req->base.complete = compl; + req->base.data = data; + req->base.flags = flags; +} +# 672 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ahash_request_set_crypt(struct ahash_request *req, + struct scatterlist *src, u8 *result, + unsigned int nbytes) +{ + req->src = src; + req->nbytes = nbytes; + req->result = result; +} +# 708 "./include/crypto/hash.h" +struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, + u32 mask); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) +{ + return &tfm->base; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_free_shash(struct crypto_shash *tfm) +{ + crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *crypto_shash_alg_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_name(crypto_shash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *crypto_shash_driver_name(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_shash_alignmask( + struct crypto_shash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); +} +# 750 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg) +{ + return ({ void *__mptr = (void *)(alg); do { extern void __compiletime_assert_1000(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(alg)), typeof(((struct shash_alg *)0)->base)) && !__builtin_types_compatible_p(typeof(*(alg)), typeof(void))))) __compiletime_assert_1000(); } while (0); ((struct shash_alg *)(__mptr - __builtin_offsetof(struct shash_alg, base))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) +{ + return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); +} +# 774 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->digestsize; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_shash_statesize(struct crypto_shash *tfm) +{ + return crypto_shash_alg(tfm)->statesize; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 crypto_shash_get_flags(struct crypto_shash *tfm) +{ + return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); +} +# 814 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int crypto_shash_descsize(struct crypto_shash *tfm) +{ + return tfm->descsize; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *shash_desc_ctx(struct shash_desc *desc) +{ + return desc->__ctx; +} +# 837 "./include/crypto/hash.h" +int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); +# 855 "./include/crypto/hash.h" +int crypto_shash_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); +# 874 "./include/crypto/hash.h" +int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, + unsigned int len, u8 *out); +# 889 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_shash_export(struct shash_desc *desc, void *out) +{ + return crypto_shash_alg(desc->tfm)->export(desc, out); +} +# 906 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_shash_import(struct shash_desc *desc, const void *in) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & 0x00000001) + return -126; + + return crypto_shash_alg(tfm)->import(desc, in); +} +# 928 "./include/crypto/hash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int crypto_shash_init(struct shash_desc *desc) +{ + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & 0x00000001) + return -126; + + return crypto_shash_alg(tfm)->init(desc); +} +# 950 "./include/crypto/hash.h" +int crypto_shash_update(struct shash_desc *desc, const u8 *data, + unsigned int len); +# 967 "./include/crypto/hash.h" +int crypto_shash_final(struct shash_desc *desc, u8 *out); +# 984 "./include/crypto/hash.h" +int crypto_shash_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void shash_desc_zero(struct shash_desc *desc) +{ + memzero_explicit(desc, + sizeof(*desc) + crypto_shash_descsize(desc->tfm)); +} +# 11 "./include/linux/uio.h" 2 +# 1 "./include/uapi/linux/uio.h" 1 +# 17 "./include/uapi/linux/uio.h" +struct iovec +{ + void *iov_base; + __kernel_size_t iov_len; +}; +# 12 "./include/linux/uio.h" 2 + +struct page; +struct pipe_inode_info; + +struct kvec { + void *iov_base; + size_t iov_len; +}; + +enum iter_type { + + ITER_IOVEC = 4, + ITER_KVEC = 8, + ITER_BVEC = 16, + ITER_PIPE = 32, + ITER_DISCARD = 64, +}; + +struct iov_iter { + + + + + + unsigned int type; + size_t iov_offset; + size_t count; + union { + const struct iovec *iov; + const struct kvec *kvec; + const struct bio_vec *bvec; + struct pipe_inode_info *pipe; + }; + union { + unsigned long nr_segs; + struct { + unsigned int head; + unsigned int start_head; + }; + }; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum iter_type iov_iter_type(const struct iov_iter *i) +{ + return i->type & ~(0 | 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool iter_is_iovec(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_IOVEC; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool iov_iter_is_kvec(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_KVEC; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool iov_iter_is_bvec(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_BVEC; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool iov_iter_is_pipe(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_PIPE; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool iov_iter_is_discard(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_DISCARD; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char iov_iter_rw(const struct iov_iter *i) +{ + return i->type & (0 | 1); +} +# 96 "./include/linux/uio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs) +{ + unsigned long seg; + size_t ret = 0; + + for (seg = 0; seg < nr_segs; seg++) + ret += iov[seg].iov_len; + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct iovec iov_iter_iovec(const struct iov_iter *iter) +{ + return (struct iovec) { + .iov_base = iter->iov->iov_base + iter->iov_offset, + .iov_len = __builtin_choose_expr(((!!(sizeof((typeof(iter->count) *)1 == (typeof(iter->iov->iov_len - iter->iov_offset) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(iter->count) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(iter->iov->iov_len - iter->iov_offset) * 0l)) : (int *)8))))), ((iter->count) < (iter->iov->iov_len - iter->iov_offset) ? (iter->count) : (iter->iov->iov_len - iter->iov_offset)), ({ typeof(iter->count) __UNIQUE_ID___x1001 = (iter->count); typeof(iter->iov->iov_len - iter->iov_offset) __UNIQUE_ID___y1002 = (iter->iov->iov_len - iter->iov_offset); ((__UNIQUE_ID___x1001) < (__UNIQUE_ID___y1002) ? (__UNIQUE_ID___x1001) : (__UNIQUE_ID___y1002)); })) + , + }; +} + +size_t iov_iter_copy_from_user_atomic(struct page *page, + struct iov_iter *i, unsigned long offset, size_t bytes); +void iov_iter_advance(struct iov_iter *i, size_t bytes); +void iov_iter_revert(struct iov_iter *i, size_t bytes); +int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); +size_t iov_iter_single_seg_count(const struct iov_iter *i); +size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, + struct iov_iter *i); +size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, + struct iov_iter *i); + +size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); +size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); +size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); +bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) +size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) +{ + if (__builtin_expect(!!(!check_copy_size(addr, bytes, true)), 0)) + return 0; + else + return _copy_to_iter(addr, bytes, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) +size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) +{ + if (__builtin_expect(!!(!check_copy_size(addr, bytes, false)), 0)) + return 0; + else + return _copy_from_iter(addr, bytes, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) +bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) +{ + if (__builtin_expect(!!(!check_copy_size(addr, bytes, false)), 0)) + return false; + else + return _copy_from_iter_full(addr, bytes, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) +size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) +{ + if (__builtin_expect(!!(!check_copy_size(addr, bytes, false)), 0)) + return 0; + else + return _copy_from_iter_nocache(addr, bytes, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) +bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) +{ + if (__builtin_expect(!!(!check_copy_size(addr, bytes, false)), 0)) + return false; + else + return _copy_from_iter_full_nocache(addr, bytes, i); +} +# 184 "./include/linux/uio.h" +size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); + + + + + +size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) +size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) +{ + if (__builtin_expect(!!(!check_copy_size(addr, bytes, false)), 0)) + return 0; + else + return _copy_from_iter_flushcache(addr, bytes, i); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) __attribute__((__warn_unused_result__)) +size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) +{ + if (__builtin_expect(!!(!check_copy_size(addr, bytes, true)), 0)) + return 0; + else + return _copy_to_iter_mcsafe(addr, bytes, i); +} + +size_t iov_iter_zero(size_t bytes, struct iov_iter *); +unsigned long iov_iter_alignment(const struct iov_iter *i); +unsigned long iov_iter_gap_alignment(const struct iov_iter *i); +void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, + unsigned long nr_segs, size_t count); +void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, + unsigned long nr_segs, size_t count); +void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, + unsigned long nr_segs, size_t count); +void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, + size_t count); +void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); +ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, + size_t maxsize, unsigned maxpages, size_t *start); +ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, + size_t maxsize, size_t *start); +int iov_iter_npages(const struct iov_iter *i, int maxpages); + +const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) size_t iov_iter_count(const struct iov_iter *i) +{ + return i->count; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void iov_iter_truncate(struct iov_iter *i, u64 count) +{ + + + + + + + if (i->count > count) + i->count = count; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void iov_iter_reexpand(struct iov_iter *i, size_t count) +{ + i->count = count; +} +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); +size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, + struct iov_iter *i); + +ssize_t import_iovec(int type, const struct iovec * uvector, + unsigned nr_segs, unsigned fast_segs, + struct iovec **iov, struct iov_iter *i); + + +struct compat_iovec; +ssize_t compat_import_iovec(int type, const struct compat_iovec * uvector, + unsigned nr_segs, unsigned fast_segs, + struct iovec **iov, struct iov_iter *i); + + +int import_single_range(int type, void *buf, size_t len, + struct iovec *iov, struct iov_iter *i); + +int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, + int (*f)(struct kvec *vec, void *context), + void *context); +# 9 "./include/linux/socket.h" 2 + + +# 1 "./include/uapi/linux/socket.h" 1 +# 10 "./include/uapi/linux/socket.h" +typedef unsigned short __kernel_sa_family_t; + + + + + +struct __kernel_sockaddr_storage { + union { + struct { + __kernel_sa_family_t ss_family; + + char __data[128 - sizeof(unsigned short)]; + + + }; + void *__align; + }; +}; +# 12 "./include/linux/socket.h" 2 + +struct pid; +struct cred; +struct socket; + + + + + +struct seq_file; +extern void socket_seq_show(struct seq_file *seq); + + +typedef __kernel_sa_family_t sa_family_t; + + + + + +struct sockaddr { + sa_family_t sa_family; + char sa_data[14]; +}; + +struct linger { + int l_onoff; + int l_linger; +}; +# 49 "./include/linux/socket.h" +struct msghdr { + void *msg_name; + int msg_namelen; + struct iov_iter msg_iter; + + + + + + + union { + void *msg_control; + void *msg_control_user; + }; + bool msg_control_is_user : 1; + __kernel_size_t msg_controllen; + unsigned int msg_flags; + struct kiocb *msg_iocb; +}; + +struct user_msghdr { + void *msg_name; + int msg_namelen; + struct iovec *msg_iov; + __kernel_size_t msg_iovlen; + void *msg_control; + __kernel_size_t msg_controllen; + unsigned int msg_flags; +}; + + +struct mmsghdr { + struct user_msghdr msg_hdr; + unsigned int msg_len; +}; + + + + + + + +struct cmsghdr { + __kernel_size_t cmsg_len; + int cmsg_level; + int cmsg_type; +}; +# 140 "./include/linux/socket.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, + struct cmsghdr *__cmsg) +{ + struct cmsghdr * __ptr; + + __ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) + ( ((__cmsg->cmsg_len)+sizeof(long)-1) & ~(sizeof(long)-1) )); + if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size) + return (struct cmsghdr *)0; + + return __ptr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) +{ + return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) size_t msg_data_left(struct msghdr *msg) +{ + return iov_iter_count(&msg->msg_iter); +} + + + + + + + +struct ucred { + __u32 pid; + __u32 uid; + __u32 gid; +}; +# 366 "./include/linux/socket.h" +extern int move_addr_to_kernel(void *uaddr, int ulen, struct __kernel_sockaddr_storage *kaddr); +extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); + +struct timespec64; +struct __kernel_timespec; +struct old_timespec32; + +struct scm_timestamping_internal { + struct timespec64 ts[3]; +}; + +extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss); +extern void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss); + + + + +extern long __sys_recvmsg(int fd, struct user_msghdr *msg, + unsigned int flags, bool forbid_cmsg_compat); +extern long __sys_sendmsg(int fd, struct user_msghdr *msg, + unsigned int flags, bool forbid_cmsg_compat); +extern int __sys_recvmmsg(int fd, struct mmsghdr *mmsg, + unsigned int vlen, unsigned int flags, + struct __kernel_timespec *timeout, + struct old_timespec32 *timeout32); +extern int __sys_sendmmsg(int fd, struct mmsghdr *mmsg, + unsigned int vlen, unsigned int flags, + bool forbid_cmsg_compat); +extern long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg, + unsigned int flags); +extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg, + struct user_msghdr *umsg, + struct sockaddr *uaddr, + unsigned int flags); +extern int sendmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr *umsg, unsigned flags, + struct iovec **iov); +extern int recvmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr *umsg, unsigned flags, + struct sockaddr **uaddr, + struct iovec **iov); +extern int __copy_msghdr_from_user(struct msghdr *kmsg, + struct user_msghdr *umsg, + struct sockaddr **save_addr, + struct iovec **uiov, size_t *nsegs); + + +extern int __sys_recvfrom(int fd, void *ubuf, size_t size, + unsigned int flags, struct sockaddr *addr, + int *addr_len); +extern int __sys_sendto(int fd, void *buff, size_t len, + unsigned int flags, struct sockaddr *addr, + int addr_len); +extern int __sys_accept4_file(struct file *file, unsigned file_flags, + struct sockaddr *upeer_sockaddr, + int *upeer_addrlen, int flags, + unsigned long nofile); +extern int __sys_accept4(int fd, struct sockaddr *upeer_sockaddr, + int *upeer_addrlen, int flags); +extern int __sys_socket(int family, int type, int protocol); +extern int __sys_bind(int fd, struct sockaddr *umyaddr, int addrlen); +extern int __sys_connect_file(struct file *file, struct __kernel_sockaddr_storage *addr, + int addrlen, int file_flags); +extern int __sys_connect(int fd, struct sockaddr *uservaddr, + int addrlen); +extern int __sys_listen(int fd, int backlog); +extern int __sys_getsockname(int fd, struct sockaddr *usockaddr, + int *usockaddr_len); +extern int __sys_getpeername(int fd, struct sockaddr *usockaddr, + int *usockaddr_len); +extern int __sys_socketpair(int family, int type, int protocol, + int *usockvec); +extern int __sys_shutdown(int fd, int how); + +extern struct ns_common *get_net_ns(struct ns_common *ns); +# 16 "./include/linux/compat.h" 2 +# 1 "./include/uapi/linux/if.h" 1 +# 23 "./include/uapi/linux/if.h" +# 1 "./include/uapi/linux/libc-compat.h" 1 +# 24 "./include/uapi/linux/if.h" 2 +# 37 "./include/uapi/linux/if.h" +# 1 "./include/uapi/linux/hdlc/ioctl.h" 1 +# 40 "./include/uapi/linux/hdlc/ioctl.h" +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + unsigned short loopback; +} sync_serial_settings; + +typedef struct { + unsigned int clock_rate; + unsigned int clock_type; + unsigned short loopback; + unsigned int slot_map; +} te1_settings; + +typedef struct { + unsigned short encoding; + unsigned short parity; +} raw_hdlc_proto; + +typedef struct { + unsigned int t391; + unsigned int t392; + unsigned int n391; + unsigned int n392; + unsigned int n393; + unsigned short lmi; + unsigned short dce; +} fr_proto; + +typedef struct { + unsigned int dlci; +} fr_proto_pvc; + +typedef struct { + unsigned int dlci; + char master[16]; +}fr_proto_pvc_info; + +typedef struct { + unsigned int interval; + unsigned int timeout; +} cisco_proto; + +typedef struct { + unsigned short dce; + unsigned int modulo; + unsigned int window; + unsigned int t1; + unsigned int t2; + unsigned int n2; +} x25_hdlc_proto; +# 38 "./include/uapi/linux/if.h" 2 +# 82 "./include/uapi/linux/if.h" +enum net_device_flags { + + + IFF_UP = 1<<0, + IFF_BROADCAST = 1<<1, + IFF_DEBUG = 1<<2, + IFF_LOOPBACK = 1<<3, + IFF_POINTOPOINT = 1<<4, + IFF_NOTRAILERS = 1<<5, + IFF_RUNNING = 1<<6, + IFF_NOARP = 1<<7, + IFF_PROMISC = 1<<8, + IFF_ALLMULTI = 1<<9, + IFF_MASTER = 1<<10, + IFF_SLAVE = 1<<11, + IFF_MULTICAST = 1<<12, + IFF_PORTSEL = 1<<13, + IFF_AUTOMEDIA = 1<<14, + IFF_DYNAMIC = 1<<15, + + + IFF_LOWER_UP = 1<<16, + IFF_DORMANT = 1<<17, + IFF_ECHO = 1<<18, + +}; +# 167 "./include/uapi/linux/if.h" +enum { + IF_OPER_UNKNOWN, + IF_OPER_NOTPRESENT, + IF_OPER_DOWN, + IF_OPER_LOWERLAYERDOWN, + IF_OPER_TESTING, + IF_OPER_DORMANT, + IF_OPER_UP, +}; + + +enum { + IF_LINK_MODE_DEFAULT, + IF_LINK_MODE_DORMANT, + IF_LINK_MODE_TESTING, +}; +# 196 "./include/uapi/linux/if.h" +struct ifmap { + unsigned long mem_start; + unsigned long mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; + +}; + + +struct if_settings { + unsigned int type; + unsigned int size; + union { + + raw_hdlc_proto *raw_hdlc; + cisco_proto *cisco; + fr_proto *fr; + fr_proto_pvc *fr_pvc; + fr_proto_pvc_info *fr_pvc_info; + x25_hdlc_proto *x25; + + + sync_serial_settings *sync; + te1_settings *te1; + } ifs_ifsu; +}; +# 234 "./include/uapi/linux/if.h" +struct ifreq { + + union + { + char ifrn_name[16]; + } ifr_ifrn; + + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + int ifru_ivalue; + int ifru_mtu; + struct ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + void * ifru_data; + struct if_settings ifru_settings; + } ifr_ifru; +}; +# 286 "./include/uapi/linux/if.h" +struct ifconf { + int ifc_len; + union { + char *ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +}; +# 17 "./include/linux/compat.h" 2 + + + + + +# 1 "./arch/x86/include/asm/compat.h" 1 +# 12 "./arch/x86/include/asm/compat.h" +# 1 "./arch/x86/include/asm/user32.h" 1 + + + + + + + +struct user_i387_ia32_struct { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + u32 st_space[20]; +}; + + +struct user32_fxsr_struct { + unsigned short cwd; + unsigned short swd; + unsigned short twd; + unsigned short fop; + int fip; + int fcs; + int foo; + int fos; + int mxcsr; + int reserved; + int st_space[32]; + int xmm_space[32]; + int padding[56]; +}; + +struct user_regs_struct32 { + __u32 ebx, ecx, edx, esi, edi, ebp, eax; + unsigned short ds, __ds, es, __es; + unsigned short fs, __fs, gs, __gs; + __u32 orig_eax, eip; + unsigned short cs, __cs; + __u32 eflags, esp; + unsigned short ss, __ss; +}; + +struct user32 { + struct user_regs_struct32 regs; + int u_fpvalid; + + struct user_i387_ia32_struct i387; + + __u32 u_tsize; + __u32 u_dsize; + __u32 u_ssize; + __u32 start_code; + __u32 start_stack; + + + + __u32 signal; + int reserved; + __u32 u_ar0; + + __u32 u_fpstate; + __u32 magic; + char u_comm[32]; + int u_debugreg[8]; +}; +# 13 "./arch/x86/include/asm/compat.h" 2 + + +# 1 "./include/asm-generic/compat.h" 1 + + + + + +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_clock_t; +typedef s32 compat_pid_t; +typedef u32 compat_ino_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef s32 compat_daddr_t; +typedef s32 compat_timer_t; +typedef s32 compat_key_t; +typedef s16 compat_short_t; +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef u16 compat_ushort_t; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; +typedef u32 compat_uptr_t; +typedef u32 compat_aio_context_t; +# 16 "./arch/x86/include/asm/compat.h" 2 + + + + +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; +typedef u32 __compat_uid32_t; +typedef u32 __compat_gid32_t; +typedef u16 compat_mode_t; +typedef u16 compat_dev_t; +typedef u16 compat_nlink_t; +typedef u16 compat_ipc_pid_t; +typedef u32 compat_caddr_t; +typedef __kernel_fsid_t compat_fsid_t; +typedef s64 __attribute__((aligned(4))) compat_s64; +typedef u64 __attribute__((aligned(4))) compat_u64; + +struct compat_stat { + compat_dev_t st_dev; + u16 __pad1; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + __compat_uid_t st_uid; + __compat_gid_t st_gid; + compat_dev_t st_rdev; + u16 __pad2; + u32 st_size; + u32 st_blksize; + u32 st_blocks; + u32 st_atime; + u32 st_atime_nsec; + u32 st_mtime; + u32 st_mtime_nsec; + u32 st_ctime; + u32 st_ctime_nsec; + u32 __unused4; + u32 __unused5; +}; + +struct compat_flock { + short l_type; + short l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; +}; +# 72 "./arch/x86/include/asm/compat.h" +struct compat_flock64 { + short l_type; + short l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; +} __attribute__((packed)); + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; + int f_frsize; + int f_flags; + int f_spare[4]; +}; + + + +typedef u32 compat_old_sigset_t; + + + + +typedef u32 compat_sigset_word; + + + +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + unsigned short mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + compat_ulong_t unused1; + compat_ulong_t unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_ulong_t sem_otime; + compat_ulong_t sem_otime_high; + compat_ulong_t sem_ctime; + compat_ulong_t sem_ctime_high; + compat_ulong_t sem_nsems; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_ulong_t msg_stime; + compat_ulong_t msg_stime_high; + compat_ulong_t msg_rtime; + compat_ulong_t msg_rtime_high; + compat_ulong_t msg_ctime; + compat_ulong_t msg_ctime_high; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_ulong_t shm_atime; + compat_ulong_t shm_atime_high; + compat_ulong_t shm_dtime; + compat_ulong_t shm_dtime_high; + compat_ulong_t shm_ctime; + compat_ulong_t shm_ctime_high; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + + + + +typedef struct user_regs_struct compat_elf_gregset_t; +# 180 "./arch/x86/include/asm/compat.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *arch_compat_alloc_user_space(long len) +{ + compat_uptr_t sp; + + if (test_ti_thread_flag(((struct thread_info *)get_current()), 17)) { + sp = ({ unsigned long __ptr = (unsigned long)task_stack_page(get_current()); __ptr += (((1UL) << 12) << (2 + 1)) - 0; ((struct pt_regs *)__ptr) - 1; })->sp; + } else { + + sp = ({ unsigned long __ptr = (unsigned long)task_stack_page(get_current()); __ptr += (((1UL) << 12) << (2 + 1)) - 0; ((struct pt_regs *)__ptr) - 1; })->sp - 128; + } + + return (void *)((sp - len) & ~((__typeof__(sp - len))((16)-1))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool in_x32_syscall(void) +{ + + if (({ unsigned long __ptr = (unsigned long)task_stack_page(get_current()); __ptr += (((1UL) << 12) << (2 + 1)) - 0; ((struct pt_regs *)__ptr) - 1; })->orig_ax & 0x40000000) + return true; + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool in_32bit_syscall(void) +{ + return (1 && ((struct thread_info *)get_current())->status & 0x0002) || in_x32_syscall(); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool in_compat_syscall(void) +{ + return in_32bit_syscall(); +} + + + +struct compat_siginfo; + + +int copy_siginfo_to_user32(struct compat_siginfo *to, + const kernel_siginfo_t *from); +# 23 "./include/linux/compat.h" 2 +# 37 "./include/linux/compat.h" +# 1 "./arch/x86/include/asm/syscall_wrapper.h" 1 +# 9 "./arch/x86/include/asm/syscall_wrapper.h" +struct pt_regs; + +extern long __x64_sys_ni_syscall(const struct pt_regs *regs); +extern long __ia32_sys_ni_syscall(const struct pt_regs *regs); +# 268 "./arch/x86/include/asm/syscall_wrapper.h" +long __x64_sys_getcpu(const struct pt_regs *regs); +long __x64_sys_gettimeofday(const struct pt_regs *regs); +long __x64_sys_time(const struct pt_regs *regs); +# 38 "./include/linux/compat.h" 2 +# 100 "./include/linux/compat.h" +typedef struct compat_sigaltstack { + compat_uptr_t ss_sp; + int ss_flags; + compat_size_t ss_size; +} compat_stack_t; +# 113 "./include/linux/compat.h" +typedef __compat_uid32_t compat_uid_t; +typedef __compat_gid32_t compat_gid_t; + +struct compat_sel_arg_struct; +struct rusage; + +struct old_itimerval32; + +struct compat_tms { + compat_clock_t tms_utime; + compat_clock_t tms_stime; + compat_clock_t tms_cutime; + compat_clock_t tms_cstime; +}; + + + +typedef struct { + compat_sigset_word sig[(64 / 32)]; +} compat_sigset_t; + +int set_compat_user_sigmask(const compat_sigset_t *umask, + size_t sigsetsize); + +struct compat_sigaction { + + compat_uptr_t sa_handler; + compat_ulong_t sa_flags; + + + + + + compat_uptr_t sa_restorer; + + compat_sigset_t sa_mask __attribute__((__packed__)); +}; + +typedef union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +} compat_sigval_t; + +typedef struct compat_siginfo { + int si_signo; + + int si_errno; + int si_code; + + + + + + union { + int _pad[128/sizeof(int) - 3]; + + + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + } _kill; + + + struct { + compat_timer_t _tid; + int _overrun; + compat_sigval_t _sigval; + } _timer; + + + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + compat_sigval_t _sigval; + } _rt; + + + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + int _status; + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + + + + struct { + compat_pid_t _pid; + __compat_uid32_t _uid; + int _status; + compat_s64 _utime; + compat_s64 _stime; + } _sigchld_x32; + + + + struct { + compat_uptr_t _addr; + + + + + + union { + + + + + short int _addr_lsb; + + struct { + char _dummy_bnd[(__alignof__(compat_uptr_t) < sizeof(short) ? sizeof(short) : __alignof__(compat_uptr_t))]; + compat_uptr_t _lower; + compat_uptr_t _upper; + } _addr_bnd; + + struct { + char _dummy_pkey[(__alignof__(compat_uptr_t) < sizeof(short) ? sizeof(short) : __alignof__(compat_uptr_t))]; + u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + + + struct { + compat_long_t _band; + int _fd; + } _sigpoll; + + struct { + compat_uptr_t _call_addr; + int _syscall; + unsigned int _arch; + } _sigsys; + } _sifields; +} compat_siginfo_t; + +struct compat_iovec { + compat_uptr_t iov_base; + compat_size_t iov_len; +}; + +struct compat_rlimit { + compat_ulong_t rlim_cur; + compat_ulong_t rlim_max; +}; + +struct compat_rusage { + struct old_timeval32 ru_utime; + struct old_timeval32 ru_stime; + compat_long_t ru_maxrss; + compat_long_t ru_ixrss; + compat_long_t ru_idrss; + compat_long_t ru_isrss; + compat_long_t ru_minflt; + compat_long_t ru_majflt; + compat_long_t ru_nswap; + compat_long_t ru_inblock; + compat_long_t ru_oublock; + compat_long_t ru_msgsnd; + compat_long_t ru_msgrcv; + compat_long_t ru_nsignals; + compat_long_t ru_nvcsw; + compat_long_t ru_nivcsw; +}; + +extern int put_compat_rusage(const struct rusage *, + struct compat_rusage *); + +struct compat_siginfo; +struct __compat_aio_sigset; + +struct compat_dirent { + u32 d_ino; + compat_off_t d_off; + u16 d_reclen; + char d_name[256]; +}; + +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + + + +typedef struct compat_sigevent { + compat_sigval_t sigev_value; + compat_int_t sigev_signo; + compat_int_t sigev_notify; + union { + compat_int_t _pad[((64/sizeof(int)) - 3)]; + compat_int_t _tid; + + struct { + compat_uptr_t _function; + compat_uptr_t _attribute; + } _sigev_thread; + } _sigev_un; +} compat_sigevent_t; + +struct compat_ifmap { + compat_ulong_t mem_start; + compat_ulong_t mem_end; + unsigned short base_addr; + unsigned char irq; + unsigned char dma; + unsigned char port; +}; + +struct compat_if_settings { + unsigned int type; + unsigned int size; + compat_uptr_t ifs_ifsu; +}; + +struct compat_ifreq { + union { + char ifrn_name[16]; + } ifr_ifrn; + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; + short ifru_flags; + compat_int_t ifru_ivalue; + compat_int_t ifru_mtu; + struct compat_ifmap ifru_map; + char ifru_slave[16]; + char ifru_newname[16]; + compat_caddr_t ifru_data; + struct compat_if_settings ifru_settings; + } ifr_ifru; +}; + +struct compat_ifconf { + compat_int_t ifc_len; + compat_caddr_t ifcbuf; +}; + +struct compat_robust_list { + compat_uptr_t next; +}; + +struct compat_robust_list_head { + struct compat_robust_list list; + compat_long_t futex_offset; + compat_uptr_t list_op_pending; +}; + + +struct compat_old_sigaction { + compat_uptr_t sa_handler; + compat_old_sigset_t sa_mask; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; +}; + + +struct compat_keyctl_kdf_params { + compat_uptr_t hashname; + compat_uptr_t otherinfo; + __u32 otherinfolen; + __u32 __spare[8]; +}; + +struct compat_statfs; +struct compat_statfs64; +struct compat_old_linux_dirent; +struct compat_linux_dirent; +struct linux_dirent64; +struct compat_msghdr; +struct compat_mmsghdr; +struct compat_sysinfo; +struct compat_sysctl_args; +struct compat_kexec_segment; +struct compat_mq_attr; +struct compat_msgbuf; + + + + + +long compat_get_bitmap(unsigned long *mask, const compat_ulong_t *umask, + unsigned long bitmap_size); +long compat_put_bitmap(compat_ulong_t *umask, unsigned long *mask, + unsigned long bitmap_size); +void copy_siginfo_to_external32(struct compat_siginfo *to, + const struct kernel_siginfo *from); +int copy_siginfo_from_user32(kernel_siginfo_t *to, + const struct compat_siginfo *from); +int __copy_siginfo_to_user32(struct compat_siginfo *to, + const kernel_siginfo_t *from); + + + +int get_compat_sigevent(struct sigevent *event, + const struct compat_sigevent *u_event); + +extern int get_compat_sigset(sigset_t *set, const compat_sigset_t *compat); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +put_compat_sigset(compat_sigset_t *compat, const sigset_t *set, + unsigned int size) +{ +# 441 "./include/linux/compat.h" + return copy_to_user(compat, set, size) ? -14 : 0; + +} + +extern int compat_ptrace_request(struct task_struct *child, + compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t addr, compat_ulong_t data); + +struct epoll_event; + +extern ssize_t compat_rw_copy_check_uvector(int type, + const struct compat_iovec *uvector, + unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer); + +extern void *compat_alloc_user_space(unsigned long len); + +int compat_restore_altstack(const compat_stack_t *uss); +int __compat_save_altstack(compat_stack_t *, unsigned long); +# 908 "./include/linux/compat.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct old_timeval32 ns_to_old_timeval32(s64 nsec) +{ + struct __kernel_old_timeval tv; + struct old_timeval32 ctv; + + tv = ns_to_kernel_old_timeval(nsec); + ctv.tv_sec = tv.tv_sec; + ctv.tv_usec = tv.tv_usec; + + return ctv; +} + + + + + + + +int kcompat_sys_statfs64(const char * pathname, compat_size_t sz, + struct compat_statfs64 * buf); +int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, + struct compat_statfs64 * buf); +# 947 "./include/linux/compat.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *compat_ptr(compat_uptr_t uptr) +{ + return (void *)(unsigned long)uptr; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) compat_uptr_t ptr_to_compat(void *uptr) +{ + return (u32)(unsigned long)uptr; +} +# 82 "./arch/x86/include/asm/ftrace.h" 2 +# 92 "./arch/x86/include/asm/ftrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_trace_is_compat_syscall(struct pt_regs *regs) +{ + return in_32bit_syscall(); +} +# 22 "./include/linux/ftrace.h" 2 +# 45 "./include/linux/ftrace.h" +void trace_init(void); +void early_trace_init(void); + + + + + +struct module; +struct ftrace_hash; +struct ftrace_direct_func; + + + +const char * +ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym); +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported); +# 82 "./include/linux/ftrace.h" +extern int ftrace_enabled; +extern int +ftrace_enable_sysctl(struct ctl_table *table, int write, + void *buffer, size_t *lenp, + loff_t *ppos); + +struct ftrace_ops; + +typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs); + +ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); +# 151 "./include/linux/ftrace.h" +enum { + FTRACE_OPS_FL_ENABLED = ((((1UL))) << (0)), + FTRACE_OPS_FL_DYNAMIC = ((((1UL))) << (1)), + FTRACE_OPS_FL_SAVE_REGS = ((((1UL))) << (2)), + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = ((((1UL))) << (3)), + FTRACE_OPS_FL_RECURSION_SAFE = ((((1UL))) << (4)), + FTRACE_OPS_FL_STUB = ((((1UL))) << (5)), + FTRACE_OPS_FL_INITIALIZED = ((((1UL))) << (6)), + FTRACE_OPS_FL_DELETED = ((((1UL))) << (7)), + FTRACE_OPS_FL_ADDING = ((((1UL))) << (8)), + FTRACE_OPS_FL_REMOVING = ((((1UL))) << (9)), + FTRACE_OPS_FL_MODIFYING = ((((1UL))) << (10)), + FTRACE_OPS_FL_ALLOC_TRAMP = ((((1UL))) << (11)), + FTRACE_OPS_FL_IPMODIFY = ((((1UL))) << (12)), + FTRACE_OPS_FL_PID = ((((1UL))) << (13)), + FTRACE_OPS_FL_RCU = ((((1UL))) << (14)), + FTRACE_OPS_FL_TRACE_ARRAY = ((((1UL))) << (15)), + FTRACE_OPS_FL_PERMANENT = ((((1UL))) << (16)), + FTRACE_OPS_FL_DIRECT = ((((1UL))) << (17)), +}; + + + +struct ftrace_ops_hash { + struct ftrace_hash *notrace_hash; + struct ftrace_hash *filter_hash; + struct mutex regex_lock; +}; + +void ftrace_free_init_mem(void); +void ftrace_free_mem(struct module *mod, void *start, void *end); +# 198 "./include/linux/ftrace.h" +struct ftrace_ops { + ftrace_func_t func; + struct ftrace_ops *next; + unsigned long flags; + void *private; + ftrace_func_t saved_func; + + struct ftrace_ops_hash local_hash; + struct ftrace_ops_hash *func_hash; + struct ftrace_ops_hash old_hash; + unsigned long trampoline; + unsigned long trampoline_size; + +}; + +extern struct ftrace_ops *ftrace_ops_list; +extern struct ftrace_ops ftrace_list_end; +# 239 "./include/linux/ftrace.h" +enum ftrace_tracing_type_t { + FTRACE_TYPE_ENTER = 0, + FTRACE_TYPE_RETURN, +}; + + +extern enum ftrace_tracing_type_t ftrace_tracing_type; +# 254 "./include/linux/ftrace.h" +int register_ftrace_function(struct ftrace_ops *ops); +int unregister_ftrace_function(struct ftrace_ops *ops); + +extern void ftrace_stub(unsigned long a0, unsigned long a1, + struct ftrace_ops *op, struct pt_regs *regs); +# 272 "./include/linux/ftrace.h" +struct ftrace_func_entry { + struct hlist_node hlist; + unsigned long ip; + unsigned long direct; +}; + +struct dyn_ftrace; + + +extern int ftrace_direct_func_count; +int register_ftrace_direct(unsigned long ip, unsigned long addr); +int unregister_ftrace_direct(unsigned long ip, unsigned long addr); +int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); +struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); +int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, + struct dyn_ftrace *rec, + unsigned long old_addr, + unsigned long new_addr); +unsigned long ftrace_find_rec_direct(unsigned long ip); +# 343 "./include/linux/ftrace.h" +extern int stack_tracer_enabled; + +int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_disable_stack_tracer; extern __attribute__((section(".data..percpu" ""))) __typeof__(int) disable_stack_tracer; +# 362 "./include/linux/ftrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void stack_tracer_disable(void) +{ + + if (0) + ({ int __ret_warn_on = !!(!preempt_count() || !({ unsigned long _flags; do { ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _flags = arch_local_save_flags(); } while (0); ({ ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(_flags); }); })); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1003)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/ftrace.h"), "i" (366), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1004)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1005)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + do { do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(disable_stack_tracer)) { case 1: do { typedef typeof((disable_stack_tracer)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((disable_stack_tracer))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((disable_stack_tracer)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((disable_stack_tracer))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((disable_stack_tracer)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((disable_stack_tracer))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((disable_stack_tracer)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((disable_stack_tracer))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void stack_tracer_enable(void) +{ + if (0) + ({ int __ret_warn_on = !!(!preempt_count() || !({ unsigned long _flags; do { ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _flags = arch_local_save_flags(); } while (0); ({ ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(_flags); }); })); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1006)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/ftrace.h"), "i" (379), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1007)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1008)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + do { do { const void *__vpp_verify = (typeof((&(disable_stack_tracer)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(disable_stack_tracer)) { case 1: do { typedef typeof((disable_stack_tracer)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(disable_stack_tracer))(1)) && ((-(typeof(disable_stack_tracer))(1)) == 1 || (-(typeof(disable_stack_tracer))(1)) == -1)) ? (int)(-(typeof(disable_stack_tracer))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(disable_stack_tracer))(1)); (void)pao_tmp__; } switch (sizeof((disable_stack_tracer))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "qi" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "re" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((disable_stack_tracer)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(disable_stack_tracer))(1)) && ((-(typeof(disable_stack_tracer))(1)) == 1 || (-(typeof(disable_stack_tracer))(1)) == -1)) ? (int)(-(typeof(disable_stack_tracer))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(disable_stack_tracer))(1)); (void)pao_tmp__; } switch (sizeof((disable_stack_tracer))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "qi" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "re" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((disable_stack_tracer)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(disable_stack_tracer))(1)) && ((-(typeof(disable_stack_tracer))(1)) == 1 || (-(typeof(disable_stack_tracer))(1)) == -1)) ? (int)(-(typeof(disable_stack_tracer))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(disable_stack_tracer))(1)); (void)pao_tmp__; } switch (sizeof((disable_stack_tracer))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "qi" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "re" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((disable_stack_tracer)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(disable_stack_tracer))(1)) && ((-(typeof(disable_stack_tracer))(1)) == 1 || (-(typeof(disable_stack_tracer))(1)) == -1)) ? (int)(-(typeof(disable_stack_tracer))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(disable_stack_tracer))(1)); (void)pao_tmp__; } switch (sizeof((disable_stack_tracer))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "qi" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "ri" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((disable_stack_tracer)) : "re" ((pao_T__)(-(typeof(disable_stack_tracer))(1)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + + + + + + + +int ftrace_arch_code_modify_prepare(void); +int ftrace_arch_code_modify_post_process(void); + +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN, + FTRACE_BUG_INIT, + FTRACE_BUG_NOP, + FTRACE_BUG_CALL, + FTRACE_BUG_UPDATE, +}; +extern enum ftrace_bug_type ftrace_bug_type; + + + + + +extern const void *ftrace_expected; + +void ftrace_bug(int err, struct dyn_ftrace *rec); + +struct seq_file; + +extern int ftrace_text_reserved(const void *start, const void *end); + +struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); + +bool is_ftrace_trampoline(unsigned long addr); +# 437 "./include/linux/ftrace.h" +enum { + FTRACE_FL_ENABLED = (1UL << 31), + FTRACE_FL_REGS = (1UL << 30), + FTRACE_FL_REGS_EN = (1UL << 29), + FTRACE_FL_TRAMP = (1UL << 28), + FTRACE_FL_TRAMP_EN = (1UL << 27), + FTRACE_FL_IPMODIFY = (1UL << 26), + FTRACE_FL_DISABLED = (1UL << 25), + FTRACE_FL_DIRECT = (1UL << 24), + FTRACE_FL_DIRECT_EN = (1UL << 23), +}; +# 457 "./include/linux/ftrace.h" +struct dyn_ftrace { + unsigned long ip; + unsigned long flags; + struct dyn_arch_ftrace arch; +}; + +int ftrace_force_update(void); +int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, + int remove, int reset); +int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +void ftrace_set_global_filter(unsigned char *buf, int len, int reset); +void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); +void ftrace_free_filter(struct ftrace_ops *ops); +void ftrace_ops_set_global_filter(struct ftrace_ops *ops); + +enum { + FTRACE_UPDATE_CALLS = (1 << 0), + FTRACE_DISABLE_CALLS = (1 << 1), + FTRACE_UPDATE_TRACE_FUNC = (1 << 2), + FTRACE_START_FUNC_RET = (1 << 3), + FTRACE_STOP_FUNC_RET = (1 << 4), + FTRACE_MAY_SLEEP = (1 << 5), +}; +# 495 "./include/linux/ftrace.h" +enum { + FTRACE_UPDATE_IGNORE, + FTRACE_UPDATE_MAKE_CALL, + FTRACE_UPDATE_MODIFY_CALL, + FTRACE_UPDATE_MAKE_NOP, +}; + +enum { + FTRACE_ITER_FILTER = (1 << 0), + FTRACE_ITER_NOTRACE = (1 << 1), + FTRACE_ITER_PRINTALL = (1 << 2), + FTRACE_ITER_DO_PROBES = (1 << 3), + FTRACE_ITER_PROBE = (1 << 4), + FTRACE_ITER_MOD = (1 << 5), + FTRACE_ITER_ENABLED = (1 << 6), +}; + +void arch_ftrace_update_code(int command); +void arch_ftrace_update_trampoline(struct ftrace_ops *ops); +void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); +void arch_ftrace_trampoline_free(struct ftrace_ops *ops); + +struct ftrace_rec_iter; + +struct ftrace_rec_iter *ftrace_rec_iter_start(void); +struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); +struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); + + + + + + + +int ftrace_update_record(struct dyn_ftrace *rec, bool enable); +int ftrace_test_record(struct dyn_ftrace *rec, bool enable); +void ftrace_run_stop_machine(int command); +unsigned long ftrace_location(unsigned long ip); +unsigned long ftrace_location_range(unsigned long start, unsigned long end); +unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); +unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); + +extern ftrace_func_t ftrace_trace_function; + +int ftrace_regex_open(struct ftrace_ops *ops, int flag, + struct inode *inode, struct file *file); +ssize_t ftrace_filter_write(struct file *file, const char *ubuf, + size_t cnt, loff_t *ppos); +ssize_t ftrace_notrace_write(struct file *file, const char *ubuf, + size_t cnt, loff_t *ppos); +int ftrace_regex_release(struct inode *inode, struct file *file); + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) +ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); + + +extern int ftrace_ip_converted(unsigned long ip); +extern int ftrace_dyn_arch_init(void); +extern void ftrace_replace_code(int enable); +extern int ftrace_update_ftrace_func(ftrace_func_t func); +extern void ftrace_caller(void); +extern void ftrace_regs_caller(void); +extern void ftrace_call(void); +extern void ftrace_regs_call(void); +extern void mcount_call(void); + +void ftrace_modify_all_code(int command); +# 590 "./include/linux/ftrace.h" +extern void ftrace_graph_caller(void); +extern int ftrace_enable_ftrace_graph_caller(void); +extern int ftrace_disable_ftrace_graph_caller(void); +# 619 "./include/linux/ftrace.h" +extern int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr); +# 645 "./include/linux/ftrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + return ftrace_make_nop(mod, rec, ((unsigned long)(__fentry__))); +} +# 671 "./include/linux/ftrace.h" +extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); +# 695 "./include/linux/ftrace.h" +extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr); +# 707 "./include/linux/ftrace.h" +extern int ftrace_arch_read_dyn_info(char *buf, int size); + +extern int skip_trace(unsigned long ip); +extern void ftrace_module_init(struct module *mod); +extern void ftrace_module_enable(struct module *mod); +extern void ftrace_release_mod(struct module *mod); + +extern void ftrace_disable_daemon(void); +extern void ftrace_enable_daemon(void); +# 760 "./include/linux/ftrace.h" +void ftrace_kill(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tracer_disable(void) +{ + + ftrace_enabled = 0; + +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __ftrace_enabled_save(void) +{ + + int saved_ftrace_enabled = ftrace_enabled; + ftrace_enabled = 0; + return saved_ftrace_enabled; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __ftrace_enabled_restore(int enabled) +{ + + ftrace_enabled = enabled; + +} +# 814 "./include/linux/ftrace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_lock_parent_ip(void) +{ + unsigned long addr = ((unsigned long)__builtin_return_address(0)); + + if (!in_lock_functions(addr)) + return addr; + addr = ((unsigned long)0UL); + if (!in_lock_functions(addr)) + return addr; + return ((unsigned long)0UL); +} +# 839 "./include/linux/ftrace.h" +extern void ftrace_init(void); +# 854 "./include/linux/ftrace.h" +struct ftrace_graph_ent { + unsigned long func; + int depth; +} __attribute__((__packed__)); + + + + + + +struct ftrace_graph_ret { + unsigned long func; + + unsigned long overrun; + unsigned long long calltime; + unsigned long long rettime; + int depth; +} __attribute__((__packed__)); + + +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); + +extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); + + + +struct fgraph_ops { + trace_func_graph_ent_t entryfunc; + trace_func_graph_ret_t retfunc; +}; + + + + + + +struct ftrace_ret_stack { + unsigned long ret; + unsigned long func; + unsigned long long calltime; + + unsigned long long subtime; + + + + + + unsigned long *retp; + +}; + + + + + + +extern void return_to_handler(void); + +extern int +function_graph_enter(unsigned long ret, unsigned long func, + unsigned long frame_pointer, unsigned long *retp); + +struct ftrace_ret_stack * +ftrace_graph_get_ret_stack(struct task_struct *task, int idx); + +unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, + unsigned long ret, unsigned long *retp); +# 933 "./include/linux/ftrace.h" +extern int register_ftrace_graph(struct fgraph_ops *ops); +extern void unregister_ftrace_graph(struct fgraph_ops *ops); + +extern bool ftrace_graph_is_dead(void); +extern void ftrace_graph_stop(void); + + +extern trace_func_graph_ret_t ftrace_graph_return; +extern trace_func_graph_ent_t ftrace_graph_entry; + +extern void ftrace_graph_init_task(struct task_struct *t); +extern void ftrace_graph_exit_task(struct task_struct *t); +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pause_graph_tracing(void) +{ + atomic_inc(&get_current()->tracing_graph_pause); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void unpause_graph_tracing(void) +{ + atomic_dec(&get_current()->tracing_graph_pause); +} +# 982 "./include/linux/ftrace.h" +enum { + TSK_TRACE_FL_TRACE_BIT = 0, + TSK_TRACE_FL_GRAPH_BIT = 1, +}; +enum { + TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, + TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_tsk_trace_trace(struct task_struct *tsk) +{ + set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_tsk_trace_trace(struct task_struct *tsk) +{ + clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_tsk_trace_trace(struct task_struct *tsk) +{ + return tsk->trace & TSK_TRACE_FL_TRACE; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_tsk_trace_graph(struct task_struct *tsk) +{ + set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_tsk_trace_graph(struct task_struct *tsk) +{ + clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int test_tsk_trace_graph(struct task_struct *tsk) +{ + return tsk->trace & TSK_TRACE_FL_GRAPH; +} + +enum ftrace_dump_mode; + +extern enum ftrace_dump_mode ftrace_dump_on_oops; +extern int tracepoint_printk; + +extern void disable_trace_on_warning(void); +extern int __disable_trace_on_warning; + +int tracepoint_printk_sysctl(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); + + + + + + + +unsigned long arch_syscall_addr(int nr); +# 50 "./include/linux/perf_event.h" 2 +# 1 "./include/linux/cpu.h" 1 +# 17 "./include/linux/cpu.h" +# 1 "./include/linux/node.h" 1 +# 18 "./include/linux/node.h" +# 1 "./include/linux/device.h" 1 +# 15 "./include/linux/device.h" +# 1 "./include/linux/dev_printk.h" 1 +# 22 "./include/linux/dev_printk.h" +struct device; + + + +__attribute__((__format__(printf, 3, 0))) __attribute__((__cold__)) +int dev_vprintk_emit(int level, const struct device *dev, + const char *fmt, va_list args); +__attribute__((__format__(printf, 3, 4))) __attribute__((__cold__)) +int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); + +__attribute__((__format__(printf, 3, 4))) __attribute__((__cold__)) +void dev_printk(const char *level, const struct device *dev, + const char *fmt, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void _dev_emerg(const struct device *dev, const char *fmt, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void _dev_alert(const struct device *dev, const char *fmt, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void _dev_crit(const struct device *dev, const char *fmt, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void _dev_err(const struct device *dev, const char *fmt, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void _dev_warn(const struct device *dev, const char *fmt, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void _dev_notice(const struct device *dev, const char *fmt, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void _dev_info(const struct device *dev, const char *fmt, ...); +# 16 "./include/linux/device.h" 2 + + +# 1 "./include/linux/klist.h" 1 +# 17 "./include/linux/klist.h" +struct klist_node; +struct klist { + spinlock_t k_lock; + struct list_head k_list; + void (*get)(struct klist_node *); + void (*put)(struct klist_node *); +} __attribute__ ((aligned (sizeof(void *)))); +# 34 "./include/linux/klist.h" +extern void klist_init(struct klist *k, void (*get)(struct klist_node *), + void (*put)(struct klist_node *)); + +struct klist_node { + void *n_klist; + struct list_head n_node; + struct kref n_ref; +}; + +extern void klist_add_tail(struct klist_node *n, struct klist *k); +extern void klist_add_head(struct klist_node *n, struct klist *k); +extern void klist_add_behind(struct klist_node *n, struct klist_node *pos); +extern void klist_add_before(struct klist_node *n, struct klist_node *pos); + +extern void klist_del(struct klist_node *n); +extern void klist_remove(struct klist_node *n); + +extern int klist_node_attached(struct klist_node *n); + + +struct klist_iter { + struct klist *i_klist; + struct klist_node *i_cur; +}; + + +extern void klist_iter_init(struct klist *k, struct klist_iter *i); +extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, + struct klist_node *n); +extern void klist_iter_exit(struct klist_iter *i); +extern struct klist_node *klist_prev(struct klist_iter *i); +extern struct klist_node *klist_next(struct klist_iter *i); +# 19 "./include/linux/device.h" 2 + + + + + +# 1 "./include/linux/pm.h" 1 +# 22 "./include/linux/pm.h" +extern void (*pm_power_off)(void); +extern void (*pm_power_off_prepare)(void); + +struct device; + +extern void pm_vt_switch_required(struct device *dev, bool required); +extern void pm_vt_switch_unregister(struct device *dev); +# 42 "./include/linux/pm.h" +struct device; + + +extern const char power_group_name[]; + + + + +typedef struct pm_message { + int event; +} pm_message_t; +# 278 "./include/linux/pm.h" +struct dev_pm_ops { + int (*prepare)(struct device *dev); + void (*complete)(struct device *dev); + int (*suspend)(struct device *dev); + int (*resume)(struct device *dev); + int (*freeze)(struct device *dev); + int (*thaw)(struct device *dev); + int (*poweroff)(struct device *dev); + int (*restore)(struct device *dev); + int (*suspend_late)(struct device *dev); + int (*resume_early)(struct device *dev); + int (*freeze_late)(struct device *dev); + int (*thaw_early)(struct device *dev); + int (*poweroff_late)(struct device *dev); + int (*restore_early)(struct device *dev); + int (*suspend_noirq)(struct device *dev); + int (*resume_noirq)(struct device *dev); + int (*freeze_noirq)(struct device *dev); + int (*thaw_noirq)(struct device *dev); + int (*poweroff_noirq)(struct device *dev); + int (*restore_noirq)(struct device *dev); + int (*runtime_suspend)(struct device *dev); + int (*runtime_resume)(struct device *dev); + int (*runtime_idle)(struct device *dev); +}; +# 496 "./include/linux/pm.h" +enum rpm_status { + RPM_ACTIVE = 0, + RPM_RESUMING, + RPM_SUSPENDED, + RPM_SUSPENDING, +}; +# 518 "./include/linux/pm.h" +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE, + RPM_REQ_SUSPEND, + RPM_REQ_AUTOSUSPEND, + RPM_REQ_RESUME, +}; + +struct wakeup_source; +struct wake_irq; +struct pm_domain_data; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; + + struct list_head clock_list; + + + struct pm_domain_data *domain_data; + +}; +# 559 "./include/linux/pm.h" +struct dev_pm_info { + pm_message_t power_state; + unsigned int can_wakeup:1; + unsigned int async_suspend:1; + bool in_dpm_list:1; + bool is_prepared:1; + bool is_suspended:1; + bool is_noirq_suspended:1; + bool is_late_suspended:1; + bool no_pm:1; + bool early_init:1; + bool direct_complete:1; + u32 driver_flags; + spinlock_t lock; + + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path:1; + bool syscore:1; + bool no_pm_callbacks:1; + unsigned int must_resume:1; + unsigned int may_skip_resume:1; + + + + + struct hrtimer suspend_timer; + unsigned long timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth:3; + unsigned int idle_notification:1; + unsigned int request_pending:1; + unsigned int deferred_resume:1; + unsigned int runtime_auto:1; + bool ignore_children:1; + unsigned int no_callbacks:1; + unsigned int irq_safe:1; + unsigned int use_autosuspend:1; + unsigned int timer_autosuspends:1; + unsigned int memalloc_noio:1; + unsigned int links_count; + enum rpm_request request; + enum rpm_status runtime_status; + int runtime_error; + int autosuspend_delay; + u64 last_busy; + u64 active_time; + u64 suspended_time; + u64 accounting_timestamp; + + struct pm_subsys_data *subsys_data; + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; +}; + +extern int dev_pm_get_subsys_data(struct device *dev); +extern void dev_pm_put_subsys_data(struct device *dev); +# 636 "./include/linux/pm.h" +struct dev_pm_domain { + struct dev_pm_ops ops; + int (*start)(struct device *dev); + void (*detach)(struct device *dev, bool power_off); + int (*activate)(struct device *dev); + void (*sync)(struct device *dev); + void (*dismiss)(struct device *dev); +}; +# 700 "./include/linux/pm.h" +extern void device_pm_lock(void); +extern void dpm_resume_start(pm_message_t state); +extern void dpm_resume_end(pm_message_t state); +extern void dpm_resume_noirq(pm_message_t state); +extern void dpm_resume_early(pm_message_t state); +extern void dpm_resume(pm_message_t state); +extern void dpm_complete(pm_message_t state); + +extern void device_pm_unlock(void); +extern int dpm_suspend_end(pm_message_t state); +extern int dpm_suspend_start(pm_message_t state); +extern int dpm_suspend_noirq(pm_message_t state); +extern int dpm_suspend_late(pm_message_t state); +extern int dpm_suspend(pm_message_t state); +extern int dpm_prepare(pm_message_t state); + +extern void __suspend_report_result(const char *function, void *fn, int ret); + + + + + + +extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); +extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)); + +extern int pm_generic_prepare(struct device *dev); +extern int pm_generic_suspend_late(struct device *dev); +extern int pm_generic_suspend_noirq(struct device *dev); +extern int pm_generic_suspend(struct device *dev); +extern int pm_generic_resume_early(struct device *dev); +extern int pm_generic_resume_noirq(struct device *dev); +extern int pm_generic_resume(struct device *dev); +extern int pm_generic_freeze_noirq(struct device *dev); +extern int pm_generic_freeze_late(struct device *dev); +extern int pm_generic_freeze(struct device *dev); +extern int pm_generic_thaw_noirq(struct device *dev); +extern int pm_generic_thaw_early(struct device *dev); +extern int pm_generic_thaw(struct device *dev); +extern int pm_generic_restore_noirq(struct device *dev); +extern int pm_generic_restore_early(struct device *dev); +extern int pm_generic_restore(struct device *dev); +extern int pm_generic_poweroff_noirq(struct device *dev); +extern int pm_generic_poweroff_late(struct device *dev); +extern int pm_generic_poweroff(struct device *dev); +extern void pm_generic_complete(struct device *dev); + +extern bool dev_pm_skip_resume(struct device *dev); +extern bool dev_pm_skip_suspend(struct device *dev); +# 794 "./include/linux/pm.h" +enum dpm_order { + DPM_ORDER_NONE, + DPM_ORDER_DEV_AFTER_PARENT, + DPM_ORDER_PARENT_BEFORE_DEV, + DPM_ORDER_DEV_LAST, +}; +# 25 "./include/linux/device.h" 2 + + + + +# 1 "./include/linux/device/bus.h" 1 +# 21 "./include/linux/device/bus.h" +struct device_driver; +struct fwnode_handle; +# 82 "./include/linux/device/bus.h" +struct bus_type { + const char *name; + const char *dev_name; + struct device *dev_root; + const struct attribute_group **bus_groups; + const struct attribute_group **dev_groups; + const struct attribute_group **drv_groups; + + int (*match)(struct device *dev, struct device_driver *drv); + int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + int (*probe)(struct device *dev); + void (*sync_state)(struct device *dev); + int (*remove)(struct device *dev); + void (*shutdown)(struct device *dev); + + int (*online)(struct device *dev); + int (*offline)(struct device *dev); + + int (*suspend)(struct device *dev, pm_message_t state); + int (*resume)(struct device *dev); + + int (*num_vf)(struct device *dev); + + int (*dma_configure)(struct device *dev); + + const struct dev_pm_ops *pm; + + const struct iommu_ops *iommu_ops; + + struct subsys_private *p; + struct lock_class_key lock_key; + + bool need_parent_lock; +}; + +extern int __attribute__((__warn_unused_result__)) bus_register(struct bus_type *bus); + +extern void bus_unregister(struct bus_type *bus); + +extern int __attribute__((__warn_unused_result__)) bus_rescan_devices(struct bus_type *bus); + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(struct bus_type *bus, char *buf); + ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); +}; +# 136 "./include/linux/device/bus.h" +extern int __attribute__((__warn_unused_result__)) bus_create_file(struct bus_type *, + struct bus_attribute *); +extern void bus_remove_file(struct bus_type *, struct bus_attribute *); + + +int device_match_name(struct device *dev, const void *name); +int device_match_of_node(struct device *dev, const void *np); +int device_match_fwnode(struct device *dev, const void *fwnode); +int device_match_devt(struct device *dev, const void *pdevt); +int device_match_acpi_dev(struct device *dev, const void *adev); +int device_match_any(struct device *dev, const void *unused); + + +struct subsys_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; +void subsys_dev_iter_init(struct subsys_dev_iter *iter, + struct bus_type *subsys, + struct device *start, + const struct device_type *type); +struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); +void subsys_dev_iter_exit(struct subsys_dev_iter *iter); + +int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, + int (*fn)(struct device *dev, void *data)); +struct device *bus_find_device(struct bus_type *bus, struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device *bus_find_device_by_name(struct bus_type *bus, + struct device *start, + const char *name) +{ + return bus_find_device(bus, start, name, device_match_name); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np) +{ + return bus_find_device(bus, ((void *)0), np, device_match_of_node); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode) +{ + return bus_find_device(bus, ((void *)0), fwnode, device_match_fwnode); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device *bus_find_device_by_devt(struct bus_type *bus, + dev_t devt) +{ + return bus_find_device(bus, ((void *)0), &devt, device_match_devt); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +bus_find_next_device(struct bus_type *bus,struct device *cur) +{ + return bus_find_device(bus, cur, ((void *)0), device_match_any); +} + + +struct acpi_device; + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev) +{ + return bus_find_device(bus, ((void *)0), adev, device_match_acpi_dev); +} +# 249 "./include/linux/device/bus.h" +struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, + struct device *hint); +int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, + void *data, int (*fn)(struct device_driver *, void *)); +void bus_sort_breadthfirst(struct bus_type *bus, + int (*compare)(const struct device *a, + const struct device *b)); + + + + + + +struct notifier_block; + +extern int bus_register_notifier(struct bus_type *bus, + struct notifier_block *nb); +extern int bus_unregister_notifier(struct bus_type *bus, + struct notifier_block *nb); +# 285 "./include/linux/device/bus.h" +extern struct kset *bus_get_kset(struct bus_type *bus); +extern struct klist *bus_get_device_klist(struct bus_type *bus); +# 30 "./include/linux/device.h" 2 +# 1 "./include/linux/device/class.h" 1 +# 22 "./include/linux/device/class.h" +struct device; +struct fwnode_handle; +# 54 "./include/linux/device/class.h" +struct class { + const char *name; + struct module *owner; + + const struct attribute_group **class_groups; + const struct attribute_group **dev_groups; + struct kobject *dev_kobj; + + int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*devnode)(struct device *dev, umode_t *mode); + + void (*class_release)(struct class *class); + void (*dev_release)(struct device *dev); + + int (*shutdown_pre)(struct device *dev); + + const struct kobj_ns_type_operations *ns_type; + const void *(*namespace)(struct device *dev); + + void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid); + + const struct dev_pm_ops *pm; + + struct subsys_private *p; +}; + +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + +extern struct kobject *sysfs_dev_block_kobj; +extern struct kobject *sysfs_dev_char_kobj; +extern int __attribute__((__warn_unused_result__)) __class_register(struct class *class, + struct lock_class_key *key); +extern void class_unregister(struct class *class); +# 99 "./include/linux/device/class.h" +struct class_compat; +struct class_compat *class_compat_register(const char *name); +void class_compat_unregister(struct class_compat *cls); +int class_compat_create_link(struct class_compat *cls, struct device *dev, + struct device *device_link); +void class_compat_remove_link(struct class_compat *cls, struct device *dev, + struct device *device_link); + +extern void class_dev_iter_init(struct class_dev_iter *iter, + struct class *class, + struct device *start, + const struct device_type *type); +extern struct device *class_dev_iter_next(struct class_dev_iter *iter); +extern void class_dev_iter_exit(struct class_dev_iter *iter); + +extern int class_for_each_device(struct class *class, struct device *start, + void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *class_find_device(struct class *class, + struct device *start, const void *data, + int (*match)(struct device *, const void *)); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device *class_find_device_by_name(struct class *class, + const char *name) +{ + return class_find_device(class, ((void *)0), name, device_match_name); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +class_find_device_by_of_node(struct class *class, const struct device_node *np) +{ + return class_find_device(class, ((void *)0), np, device_match_of_node); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +class_find_device_by_fwnode(struct class *class, + const struct fwnode_handle *fwnode) +{ + return class_find_device(class, ((void *)0), fwnode, device_match_fwnode); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device *class_find_device_by_devt(struct class *class, + dev_t devt) +{ + return class_find_device(class, ((void *)0), &devt, device_match_devt); +} + + +struct acpi_device; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev) +{ + return class_find_device(class, ((void *)0), adev, device_match_acpi_dev); +} +# 191 "./include/linux/device/class.h" +struct class_attribute { + struct attribute attr; + ssize_t (*show)(struct class *class, struct class_attribute *attr, + char *buf); + ssize_t (*store)(struct class *class, struct class_attribute *attr, + const char *buf, size_t count); +}; +# 206 "./include/linux/device/class.h" +extern int __attribute__((__warn_unused_result__)) class_create_file_ns(struct class *class, + const struct class_attribute *attr, + const void *ns); +extern void class_remove_file_ns(struct class *class, + const struct class_attribute *attr, + const void *ns); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) class_create_file(struct class *class, + const struct class_attribute *attr) +{ + return class_create_file_ns(class, attr, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void class_remove_file(struct class *class, + const struct class_attribute *attr) +{ + return class_remove_file_ns(class, attr, ((void *)0)); +} + + +struct class_attribute_string { + struct class_attribute attr; + char *str; +}; +# 238 "./include/linux/device/class.h" +extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, + char *buf); + +struct class_interface { + struct list_head node; + struct class *class; + + int (*add_dev) (struct device *, struct class_interface *); + void (*remove_dev) (struct device *, struct class_interface *); +}; + +extern int __attribute__((__warn_unused_result__)) class_interface_register(struct class_interface *); +extern void class_interface_unregister(struct class_interface *); + +extern struct class * __attribute__((__warn_unused_result__)) __class_create(struct module *owner, + const char *name, + struct lock_class_key *key); +extern void class_destroy(struct class *cls); +# 31 "./include/linux/device.h" 2 +# 1 "./include/linux/device/driver.h" 1 +# 44 "./include/linux/device/driver.h" +enum probe_type { + PROBE_DEFAULT_STRATEGY, + PROBE_PREFER_ASYNCHRONOUS, + PROBE_FORCE_SYNCHRONOUS, +}; +# 95 "./include/linux/device/driver.h" +struct device_driver { + const char *name; + struct bus_type *bus; + + struct module *owner; + const char *mod_name; + + bool suppress_bind_attrs; + enum probe_type probe_type; + + const struct of_device_id *of_match_table; + const struct acpi_device_id *acpi_match_table; + + int (*probe) (struct device *dev); + void (*sync_state)(struct device *dev); + int (*remove) (struct device *dev); + void (*shutdown) (struct device *dev); + int (*suspend) (struct device *dev, pm_message_t state); + int (*resume) (struct device *dev); + const struct attribute_group **groups; + const struct attribute_group **dev_groups; + + const struct dev_pm_ops *pm; + void (*coredump) (struct device *dev); + + struct driver_private *p; +}; + + +extern int __attribute__((__warn_unused_result__)) driver_register(struct device_driver *drv); +extern void driver_unregister(struct device_driver *drv); + +extern struct device_driver *driver_find(const char *name, + struct bus_type *bus); +extern int driver_probe_done(void); +extern void wait_for_device_probe(void); + + + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *driver, char *buf); + ssize_t (*store)(struct device_driver *driver, const char *buf, + size_t count); +}; +# 148 "./include/linux/device/driver.h" +extern int __attribute__((__warn_unused_result__)) driver_create_file(struct device_driver *driver, + const struct driver_attribute *attr); +extern void driver_remove_file(struct device_driver *driver, + const struct driver_attribute *attr); + +extern int __attribute__((__warn_unused_result__)) driver_for_each_device(struct device_driver *drv, + struct device *start, + void *data, + int (*fn)(struct device *dev, + void *)); +struct device *driver_find_device(struct device_driver *drv, + struct device *start, const void *data, + int (*match)(struct device *dev, const void *data)); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device *driver_find_device_by_name(struct device_driver *drv, + const char *name) +{ + return driver_find_device(drv, ((void *)0), name, device_match_name); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +driver_find_device_by_of_node(struct device_driver *drv, + const struct device_node *np) +{ + return driver_find_device(drv, ((void *)0), np, device_match_of_node); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +driver_find_device_by_fwnode(struct device_driver *drv, + const struct fwnode_handle *fwnode) +{ + return driver_find_device(drv, ((void *)0), fwnode, device_match_fwnode); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device *driver_find_device_by_devt(struct device_driver *drv, + dev_t devt) +{ + return driver_find_device(drv, ((void *)0), &devt, device_match_devt); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device *driver_find_next_device(struct device_driver *drv, + struct device *start) +{ + return driver_find_device(drv, start, ((void *)0), device_match_any); +} +# 225 "./include/linux/device/driver.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device * +driver_find_device_by_acpi_dev(struct device_driver *drv, + const struct acpi_device *adev) +{ + return driver_find_device(drv, ((void *)0), adev, device_match_acpi_dev); +} +# 239 "./include/linux/device/driver.h" +extern int driver_deferred_probe_timeout; +void driver_deferred_probe_add(struct device *dev); +int driver_deferred_probe_check_state(struct device *dev); +void driver_init(void); +# 32 "./include/linux/device.h" 2 +# 1 "./arch/x86/include/asm/device.h" 1 + + + + +struct dev_archdata { + + void *iommu; + +}; + +struct pdev_archdata { +}; +# 33 "./include/linux/device.h" 2 + +struct device; +struct device_private; +struct device_driver; +struct driver_private; +struct module; +struct class; +struct subsys_private; +struct device_node; +struct fwnode_handle; +struct iommu_ops; +struct iommu_group; +struct dev_pin_info; +struct dev_iommu; +# 61 "./include/linux/device.h" +struct subsys_interface { + const char *name; + struct bus_type *subsys; + struct list_head node; + int (*add_dev)(struct device *dev, struct subsys_interface *sif); + void (*remove_dev)(struct device *dev, struct subsys_interface *sif); +}; + +int subsys_interface_register(struct subsys_interface *sif); +void subsys_interface_unregister(struct subsys_interface *sif); + +int subsys_system_register(struct bus_type *subsys, + const struct attribute_group **groups); +int subsys_virtual_register(struct bus_type *subsys, + const struct attribute_group **groups); +# 86 "./include/linux/device.h" +struct device_type { + const char *name; + const struct attribute_group **groups; + int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*devnode)(struct device *dev, umode_t *mode, + kuid_t *uid, kgid_t *gid); + void (*release)(struct device *dev); + + const struct dev_pm_ops *pm; +}; + + +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *dev, struct device_attribute *attr, + char *buf); + ssize_t (*store)(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +}; + +struct dev_ext_attribute { + struct device_attribute attr; + void *var; +}; + +ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t device_show_int(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_int(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, + char *buf); +ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +# 148 "./include/linux/device.h" +extern int device_create_file(struct device *device, + const struct device_attribute *entry); +extern void device_remove_file(struct device *dev, + const struct device_attribute *attr); +extern bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr); +extern int __attribute__((__warn_unused_result__)) device_create_bin_file(struct device *dev, + const struct bin_attribute *attr); +extern void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr); + + +typedef void (*dr_release_t)(struct device *dev, void *res); +typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); + + +extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid, const char *name) __attribute__((__malloc__)); +# 179 "./include/linux/device.h" +extern void devres_for_each_res(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data, + void (*fn)(struct device *, void *, void *), + void *data); +extern void devres_free(void *res); +extern void devres_add(struct device *dev, void *res); +extern void *devres_find(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern void *devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data); +extern void *devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern int devres_destroy(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +extern int devres_release(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); + + +extern void * __attribute__((__warn_unused_result__)) devres_open_group(struct device *dev, void *id, + gfp_t gfp); +extern void devres_close_group(struct device *dev, void *id); +extern void devres_remove_group(struct device *dev, void *id); +extern int devres_release_group(struct device *dev, void *id); + + +extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __attribute__((__malloc__)); +extern __attribute__((__format__(printf, 3, 0))) +char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap) __attribute__((__malloc__)); +extern __attribute__((__format__(printf, 3, 4))) +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __attribute__((__malloc__)); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) +{ + return devm_kmalloc(dev, size, gfp | (( gfp_t)0x100u)); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *devm_kmalloc_array(struct device *dev, + size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (__builtin_expect(!!(({ typeof(n) __a = (n); typeof(size) __b = (size); typeof(&bytes) __d = (&bytes); (void) (&__a == &__b); (void) (&__a == __d); __builtin_mul_overflow(__a, __b, __d); })), 0)) + return ((void *)0); + + return devm_kmalloc(dev, bytes, flags); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *devm_kcalloc(struct device *dev, + size_t n, size_t size, gfp_t flags) +{ + return devm_kmalloc_array(dev, n, size, flags | (( gfp_t)0x100u)); +} +extern void devm_kfree(struct device *dev, const void *p); +extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __attribute__((__malloc__)); +extern const char *devm_kstrdup_const(struct device *dev, + const char *s, gfp_t gfp); +extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); + +extern unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order); +extern void devm_free_pages(struct device *dev, unsigned long addr); + +void *devm_ioremap_resource(struct device *dev, + const struct resource *res); +void *devm_ioremap_resource_wc(struct device *dev, + const struct resource *res); + +void *devm_of_iomap(struct device *dev, + struct device_node *node, int index, + resource_size_t *size); + + +int devm_add_action(struct device *dev, void (*action)(void *), void *data); +void devm_remove_action(struct device *dev, void (*action)(void *), void *data); +void devm_release_action(struct device *dev, void (*action)(void *), void *data); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int devm_add_action_or_reset(struct device *dev, + void (*action)(void *), void *data) +{ + int ret; + + ret = devm_add_action(dev, action, data); + if (ret) + action(data); + + return ret; +} +# 281 "./include/linux/device.h" +void *__devm_alloc_percpu(struct device *dev, size_t size, + size_t align); +void devm_free_percpu(struct device *dev, void *pdata); + +struct device_dma_parameters { + + + + + unsigned int max_segment_size; + unsigned long segment_boundary_mask; +}; +# 305 "./include/linux/device.h" +struct device_connection { + struct fwnode_handle *fwnode; + const char *endpoint[2]; + const char *id; + struct list_head list; +}; + +typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep, + void *data); + +void *fwnode_connection_find_match(struct fwnode_handle *fwnode, + const char *con_id, void *data, + devcon_match_fn_t match); +void *device_connection_find_match(struct device *dev, const char *con_id, + void *data, devcon_match_fn_t match); + +struct device *device_connection_find(struct device *dev, const char *con_id); + +void device_connection_add(struct device_connection *con); +void device_connection_remove(struct device_connection *con); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_connections_add(struct device_connection *cons) +{ + struct device_connection *c; + + for (c = cons; c->endpoint[0]; c++) + device_connection_add(c); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_connections_remove(struct device_connection *cons) +{ + struct device_connection *c; + + for (c = cons; c->endpoint[0]; c++) + device_connection_remove(c); +} +# 359 "./include/linux/device.h" +enum device_link_state { + DL_STATE_NONE = -1, + DL_STATE_DORMANT = 0, + DL_STATE_AVAILABLE, + DL_STATE_CONSUMER_PROBE, + DL_STATE_ACTIVE, + DL_STATE_SUPPLIER_UNBIND, +}; +# 402 "./include/linux/device.h" +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; + + struct callback_head callback_head; + + bool supplier_preactivated; +}; +# 424 "./include/linux/device.h" +enum dl_dev_state { + DL_DEV_NO_DRIVER = 0, + DL_DEV_PROBING, + DL_DEV_DRIVER_BOUND, + DL_DEV_UNBINDING, +}; +# 441 "./include/linux/device.h" +struct dev_links_info { + struct list_head suppliers; + struct list_head consumers; + struct list_head needs_suppliers; + struct list_head defer_sync; + bool need_for_probe; + enum dl_dev_state status; +}; +# 535 "./include/linux/device.h" +struct device { + struct kobject kobj; + struct device *parent; + + struct device_private *p; + + const char *init_name; + const struct device_type *type; + + struct bus_type *bus; + struct device_driver *driver; + + void *platform_data; + + void *driver_data; + + + struct mutex lockdep_mutex; + + struct mutex mutex; + + + + struct dev_links_info links; + struct dev_pm_info power; + struct dev_pm_domain *pm_domain; + + + struct irq_domain *msi_domain; + + + struct dev_pin_info *pins; + + + struct list_head msi_list; + + + const struct dma_map_ops *dma_ops; + u64 *dma_mask; + u64 coherent_dma_mask; + + + + + u64 bus_dma_limit; + unsigned long dma_pfn_offset; + + struct device_dma_parameters *dma_parms; + + struct list_head dma_pools; + + + struct dma_coherent_mem *dma_mem; + + + + struct cma *cma_area; + + + + struct dev_archdata archdata; + + struct device_node *of_node; + struct fwnode_handle *fwnode; + + + int numa_node; + + dev_t devt; + u32 id; + + spinlock_t devres_lock; + struct list_head devres_head; + + struct class *class; + const struct attribute_group **groups; + + void (*release)(struct device *dev); + struct iommu_group *iommu_group; + struct dev_iommu *iommu; + + bool offline_disabled:1; + bool offline:1; + bool of_node_reused:1; + bool state_synced:1; + + + + + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device *kobj_to_dev(struct kobject *kobj) +{ + return ({ void *__mptr = (void *)(kobj); do { extern void __compiletime_assert_1009(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(kobj)), typeof(((struct device *)0)->kobj)) && !__builtin_types_compatible_p(typeof(*(kobj)), typeof(void))))) __compiletime_assert_1009(); } while (0); ((struct device *)(__mptr - __builtin_offsetof(struct device, kobj))); }); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool device_iommu_mapped(struct device *dev) +{ + return (dev->iommu_group != ((void *)0)); +} + + +# 1 "./include/linux/pm_wakeup.h" 1 +# 18 "./include/linux/pm_wakeup.h" +struct wake_irq; +# 43 "./include/linux/pm_wakeup.h" +struct wakeup_source { + const char *name; + int id; + struct list_head entry; + spinlock_t lock; + struct wake_irq *wakeirq; + struct timer_list timer; + unsigned long timer_expires; + ktime_t total_time; + ktime_t max_time; + ktime_t last_time; + ktime_t start_prevent_time; + ktime_t prevent_sleep_time; + unsigned long event_count; + unsigned long active_count; + unsigned long relax_count; + unsigned long expire_count; + unsigned long wakeup_count; + struct device *dev; + bool active:1; + bool autosleep_enabled:1; +}; +# 77 "./include/linux/pm_wakeup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool device_can_wakeup(struct device *dev) +{ + return dev->power.can_wakeup; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool device_may_wakeup(struct device *dev) +{ + return dev->power.can_wakeup && !!dev->power.wakeup; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_set_wakeup_path(struct device *dev) +{ + dev->power.wakeup_path = true; +} + + +extern struct wakeup_source *wakeup_source_create(const char *name); +extern void wakeup_source_destroy(struct wakeup_source *ws); +extern void wakeup_source_add(struct wakeup_source *ws); +extern void wakeup_source_remove(struct wakeup_source *ws); +extern struct wakeup_source *wakeup_source_register(struct device *dev, + const char *name); +extern void wakeup_source_unregister(struct wakeup_source *ws); +extern int wakeup_sources_read_lock(void); +extern void wakeup_sources_read_unlock(int idx); +extern struct wakeup_source *wakeup_sources_walk_start(void); +extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws); +extern int device_wakeup_enable(struct device *dev); +extern int device_wakeup_disable(struct device *dev); +extern void device_set_wakeup_capable(struct device *dev, bool capable); +extern int device_init_wakeup(struct device *dev, bool val); +extern int device_set_wakeup_enable(struct device *dev, bool enable); +extern void __pm_stay_awake(struct wakeup_source *ws); +extern void pm_stay_awake(struct device *dev); +extern void __pm_relax(struct wakeup_source *ws); +extern void pm_relax(struct device *dev); +extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard); +extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard); +# 195 "./include/linux/pm_wakeup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +{ + return pm_wakeup_ws_event(ws, msec, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pm_wakeup_event(struct device *dev, unsigned int msec) +{ + return pm_wakeup_dev_event(dev, msec, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pm_wakeup_hard_event(struct device *dev) +{ + return pm_wakeup_dev_event(dev, 0, true); +} +# 644 "./include/linux/device.h" 2 + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *dev_name(const struct device *dev) +{ + + if (dev->init_name) + return dev->init_name; + + return kobject_name(&dev->kobj); +} + +extern __attribute__((__format__(printf, 2, 3))) +int dev_set_name(struct device *dev, const char *name, ...); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dev_to_node(struct device *dev) +{ + return dev->numa_node; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_dev_node(struct device *dev, int node) +{ + dev->numa_node = node; +} +# 676 "./include/linux/device.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct irq_domain *dev_get_msi_domain(const struct device *dev) +{ + + return dev->msi_domain; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_set_msi_domain(struct device *dev, struct irq_domain *d) +{ + + dev->msi_domain = d; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *dev_get_drvdata(const struct device *dev) +{ + return dev->driver_data; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_set_drvdata(struct device *dev, void *data) +{ + dev->driver_data = data; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct pm_subsys_data *dev_to_psd(struct device *dev) +{ + return dev ? dev->power.subsys_data : ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int dev_get_uevent_suppress(const struct device *dev) +{ + return dev->kobj.uevent_suppress; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_set_uevent_suppress(struct device *dev, int val) +{ + dev->kobj.uevent_suppress = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int device_is_registered(struct device *dev) +{ + return dev->kobj.state_in_sysfs; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_enable_async_suspend(struct device *dev) +{ + if (!dev->power.is_prepared) + dev->power.async_suspend = true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_disable_async_suspend(struct device *dev) +{ + if (!dev->power.is_prepared) + dev->power.async_suspend = false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool device_async_suspend_enabled(struct device *dev) +{ + return !!dev->power.async_suspend; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool device_pm_not_required(struct device *dev) +{ + return dev->power.no_pm; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_set_pm_not_required(struct device *dev) +{ + dev->power.no_pm = true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_pm_syscore_device(struct device *dev, bool val) +{ + + dev->power.syscore = val; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_pm_set_driver_flags(struct device *dev, u32 flags) +{ + dev->power.driver_flags = flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dev_pm_test_driver_flags(struct device *dev, u32 flags) +{ + return !!(dev->power.driver_flags & flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_lock(struct device *dev) +{ + mutex_lock_nested(&dev->mutex, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int device_lock_interruptible(struct device *dev) +{ + return mutex_lock_interruptible_nested(&dev->mutex, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int device_trylock(struct device *dev) +{ + return mutex_trylock(&dev->mutex); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_unlock(struct device *dev) +{ + mutex_unlock(&dev->mutex); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_lock_assert(struct device *dev) +{ + do { ({ int __ret_warn_on = !!(debug_locks && !lock_is_held(&(&dev->mutex)->dep_map)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1010)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/device.h"), "i" (788), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1011)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1012)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct device_node *dev_of_node(struct device *dev) +{ + if (!1 || !dev) + return ((void *)0); + return dev->of_node; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dev_has_sync_state(struct device *dev) +{ + if (!dev) + return false; + if (dev->driver && dev->driver->sync_state) + return true; + if (dev->bus && dev->bus->sync_state) + return true; + return false; +} + + + + +extern int __attribute__((__warn_unused_result__)) device_register(struct device *dev); +extern void device_unregister(struct device *dev); +extern void device_initialize(struct device *dev); +extern int __attribute__((__warn_unused_result__)) device_add(struct device *dev); +extern void device_del(struct device *dev); +extern int device_for_each_child(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +extern int device_for_each_child_reverse(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +extern struct device *device_find_child(struct device *dev, void *data, + int (*match)(struct device *dev, void *data)); +extern struct device *device_find_child_by_name(struct device *parent, + const char *name); +extern int device_rename(struct device *dev, const char *new_name); +extern int device_move(struct device *dev, struct device *new_parent, + enum dpm_order dpm_order); +extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); +extern const char *device_get_devnode(struct device *dev, + umode_t *mode, kuid_t *uid, kgid_t *gid, + const char **tmp); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool device_supports_offline(struct device *dev) +{ + return dev->bus && dev->bus->offline && dev->bus->online; +} + +extern void lock_device_hotplug(void); +extern void unlock_device_hotplug(void); +extern int lock_device_hotplug_sysfs(void); +extern int device_offline(struct device *dev); +extern int device_online(struct device *dev); +extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dev_num_vf(struct device *dev) +{ + if (dev->bus && dev->bus->num_vf) + return dev->bus->num_vf(dev); + return 0; +} + + + + +extern struct device *__root_device_register(const char *name, + struct module *owner); + + + + + +extern void root_device_unregister(struct device *root); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *dev_get_platdata(const struct device *dev) +{ + return dev->platform_data; +} + + + + + +extern int __attribute__((__warn_unused_result__)) device_bind_driver(struct device *dev); +extern void device_release_driver(struct device *dev); +extern int __attribute__((__warn_unused_result__)) device_attach(struct device *dev); +extern int __attribute__((__warn_unused_result__)) driver_attach(struct device_driver *drv); +extern void device_initial_probe(struct device *dev); +extern int __attribute__((__warn_unused_result__)) device_reprobe(struct device *dev); + +extern bool device_is_bound(struct device *dev); + + + + +extern __attribute__((__format__(printf, 5, 6))) +struct device *device_create(struct class *cls, struct device *parent, + dev_t devt, void *drvdata, + const char *fmt, ...); +extern __attribute__((__format__(printf, 6, 7))) +struct device *device_create_with_groups(struct class *cls, + struct device *parent, dev_t devt, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); +extern void device_destroy(struct class *cls, dev_t devt); + +extern int __attribute__((__warn_unused_result__)) device_add_groups(struct device *dev, + const struct attribute_group **groups); +extern void device_remove_groups(struct device *dev, + const struct attribute_group **groups); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) device_add_group(struct device *dev, + const struct attribute_group *grp) +{ + const struct attribute_group *groups[] = { grp, ((void *)0) }; + + return device_add_groups(dev, groups); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void device_remove_group(struct device *dev, + const struct attribute_group *grp) +{ + const struct attribute_group *groups[] = { grp, ((void *)0) }; + + return device_remove_groups(dev, groups); +} + +extern int __attribute__((__warn_unused_result__)) devm_device_add_groups(struct device *dev, + const struct attribute_group **groups); +extern void devm_device_remove_groups(struct device *dev, + const struct attribute_group **groups); +extern int __attribute__((__warn_unused_result__)) devm_device_add_group(struct device *dev, + const struct attribute_group *grp); +extern void devm_device_remove_group(struct device *dev, + const struct attribute_group *grp); + + + + + + + +extern int (*platform_notify)(struct device *dev); + +extern int (*platform_notify_remove)(struct device *dev); + + + + + + +extern struct device *get_device(struct device *dev); +extern void put_device(struct device *dev); +extern bool kill_device(struct device *dev); + + +extern int devtmpfs_mount(void); + + + + + +extern void device_shutdown(void); + + +extern const char *dev_driver_string(const struct device *dev); + + +struct device_link *device_link_add(struct device *consumer, + struct device *supplier, u32 flags); +void device_link_del(struct device_link *link); +void device_link_remove(void *consumer, struct device *supplier); +void device_links_supplier_sync_state_pause(void); +void device_links_supplier_sync_state_resume(void); +# 974 "./include/linux/device.h" +extern long sysfs_deprecated; +# 19 "./include/linux/node.h" 2 +# 31 "./include/linux/node.h" +struct node_hmem_attrs { + unsigned int read_bandwidth; + unsigned int write_bandwidth; + unsigned int read_latency; + unsigned int write_latency; +}; + +enum cache_indexing { + NODE_CACHE_DIRECT_MAP, + NODE_CACHE_INDEXED, + NODE_CACHE_OTHER, +}; + +enum cache_write_policy { + NODE_CACHE_WRITE_BACK, + NODE_CACHE_WRITE_THROUGH, + NODE_CACHE_WRITE_OTHER, +}; +# 59 "./include/linux/node.h" +struct node_cache_attrs { + enum cache_indexing indexing; + enum cache_write_policy write_policy; + u64 size; + u16 line_size; + u8 level; +}; + + +void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs); +void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, + unsigned access); +# 84 "./include/linux/node.h" +struct node { + struct device dev; + struct list_head access_list; + + + struct work_struct node_work; + + + struct list_head cache_attrs; + struct device *cache_dev; + +}; + +struct memory_block; +extern struct node *node_devices[]; +typedef void (*node_registration_func_t)(struct node *); + + +extern int link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn); +# 112 "./include/linux/node.h" +extern void unregister_node(struct node *node); + + +extern int __register_one_node(int nid); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_one_node(int nid) +{ + int error = 0; + + if (node_state((nid), N_ONLINE)) { + struct pglist_data *pgdat = (node_data[nid]); + unsigned long start_pfn = pgdat->node_start_pfn; + unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; + + error = __register_one_node(nid); + if (error) + return error; + + error = link_mem_sections(nid, start_pfn, end_pfn); + } + + return error; +} + +extern void unregister_one_node(int nid); +extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); +extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); +extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); + +extern int register_memory_node_under_compute_node(unsigned int mem_nid, + unsigned int cpu_nid, + unsigned access); + + +extern void register_hugetlbfs_with_node(node_registration_func_t doregister, + node_registration_func_t unregister); +# 18 "./include/linux/cpu.h" 2 + + +# 1 "./include/linux/cpuhotplug.h" 1 +# 25 "./include/linux/cpuhotplug.h" +enum cpuhp_state { + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, + CPUHP_CREATE_THREADS, + CPUHP_PERF_PREPARE, + CPUHP_PERF_X86_PREPARE, + CPUHP_PERF_X86_AMD_UNCORE_PREP, + CPUHP_PERF_POWER, + CPUHP_PERF_SUPERH, + CPUHP_X86_HPET_DEAD, + CPUHP_X86_APB_DEAD, + CPUHP_X86_MCE_DEAD, + CPUHP_VIRT_NET_DEAD, + CPUHP_SLUB_DEAD, + CPUHP_MM_WRITEBACK_DEAD, + CPUHP_MM_VMSTAT_DEAD, + CPUHP_SOFTIRQ_DEAD, + CPUHP_NET_MVNETA_DEAD, + CPUHP_CPUIDLE_DEAD, + CPUHP_ARM64_FPSIMD_DEAD, + CPUHP_ARM_OMAP_WAKE_DEAD, + CPUHP_IRQ_POLL_DEAD, + CPUHP_BLOCK_SOFTIRQ_DEAD, + CPUHP_ACPI_CPUDRV_DEAD, + CPUHP_S390_PFAULT_DEAD, + CPUHP_BLK_MQ_DEAD, + CPUHP_FS_BUFF_DEAD, + CPUHP_PRINTK_DEAD, + CPUHP_MM_MEMCQ_DEAD, + CPUHP_PERCPU_CNT_DEAD, + CPUHP_RADIX_DEAD, + CPUHP_PAGE_ALLOC_DEAD, + CPUHP_NET_DEV_DEAD, + CPUHP_PCI_XGENE_DEAD, + CPUHP_IOMMU_INTEL_DEAD, + CPUHP_LUSTRE_CFS_DEAD, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, + CPUHP_PADATA_DEAD, + CPUHP_WORKQUEUE_PREP, + CPUHP_POWER_NUMA_PREPARE, + CPUHP_HRTIMERS_PREPARE, + CPUHP_PROFILE_PREPARE, + CPUHP_X2APIC_PREPARE, + CPUHP_SMPCFD_PREPARE, + CPUHP_RELAY_PREPARE, + CPUHP_SLAB_PREPARE, + CPUHP_MD_RAID5_PREPARE, + CPUHP_RCUTREE_PREP, + CPUHP_CPUIDLE_COUPLED_PREPARE, + CPUHP_POWERPC_PMAC_PREPARE, + CPUHP_POWERPC_MMU_CTX_PREPARE, + CPUHP_XEN_PREPARE, + CPUHP_XEN_EVTCHN_PREPARE, + CPUHP_ARM_SHMOBILE_SCU_PREPARE, + CPUHP_SH_SH3X_PREPARE, + CPUHP_NET_FLOW_PREPARE, + CPUHP_TOPOLOGY_PREPARE, + CPUHP_NET_IUCV_PREPARE, + CPUHP_ARM_BL_PREPARE, + CPUHP_TRACE_RB_PREPARE, + CPUHP_MM_ZS_PREPARE, + CPUHP_MM_ZSWP_MEM_PREPARE, + CPUHP_MM_ZSWP_POOL_PREPARE, + CPUHP_KVM_PPC_BOOK3S_PREPARE, + CPUHP_ZCOMP_PREPARE, + CPUHP_TIMERS_PREPARE, + CPUHP_MIPS_SOC_PREPARE, + CPUHP_BP_PREPARE_DYN, + CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, + CPUHP_BRINGUP_CPU, + CPUHP_AP_IDLE_DEAD, + CPUHP_AP_OFFLINE, + CPUHP_AP_SCHED_STARTING, + CPUHP_AP_RCUTREE_DYING, + CPUHP_AP_CPU_PM_STARTING, + CPUHP_AP_IRQ_GIC_STARTING, + CPUHP_AP_IRQ_HIP04_STARTING, + CPUHP_AP_IRQ_ARMADA_XP_STARTING, + CPUHP_AP_IRQ_BCM2836_STARTING, + CPUHP_AP_IRQ_MIPS_GIC_STARTING, + CPUHP_AP_IRQ_RISCV_STARTING, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + CPUHP_AP_ARM_MVEBU_COHERENCY, + CPUHP_AP_MICROCODE_LOADER, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, + CPUHP_AP_PERF_X86_STARTING, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING, + CPUHP_AP_PERF_X86_CQM_STARTING, + CPUHP_AP_PERF_X86_CSTATE_STARTING, + CPUHP_AP_PERF_XTENSA_STARTING, + CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, + CPUHP_AP_ARM_SDEI_STARTING, + CPUHP_AP_ARM_VFP_STARTING, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, + CPUHP_AP_PERF_ARM_ACPI_STARTING, + CPUHP_AP_PERF_ARM_STARTING, + CPUHP_AP_ARM_L2X0_STARTING, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, + CPUHP_AP_ARM_ARCH_TIMER_STARTING, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, + CPUHP_AP_JCORE_TIMER_STARTING, + CPUHP_AP_ARM_TWD_STARTING, + CPUHP_AP_QCOM_TIMER_STARTING, + CPUHP_AP_TEGRA_TIMER_STARTING, + CPUHP_AP_ARMADA_TIMER_STARTING, + CPUHP_AP_MARCO_TIMER_STARTING, + CPUHP_AP_MIPS_GIC_TIMER_STARTING, + CPUHP_AP_ARC_TIMER_STARTING, + CPUHP_AP_RISCV_TIMER_STARTING, + CPUHP_AP_CSKY_TIMER_STARTING, + CPUHP_AP_HYPERV_TIMER_STARTING, + CPUHP_AP_KVM_STARTING, + CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, + CPUHP_AP_KVM_ARM_VGIC_STARTING, + CPUHP_AP_KVM_ARM_TIMER_STARTING, + + CPUHP_AP_DUMMY_TIMER_STARTING, + CPUHP_AP_ARM_XEN_STARTING, + CPUHP_AP_ARM_KVMPV_STARTING, + CPUHP_AP_ARM_CORESIGHT_STARTING, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, + CPUHP_AP_ARM64_ISNDEP_STARTING, + CPUHP_AP_SMPCFD_DYING, + CPUHP_AP_X86_TBOOT_DYING, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING, + CPUHP_AP_ONLINE, + CPUHP_TEARDOWN_CPU, + CPUHP_AP_ONLINE_IDLE, + CPUHP_AP_SMPBOOT_THREADS, + CPUHP_AP_X86_VDSO_VMA_ONLINE, + CPUHP_AP_IRQ_AFFINITY_ONLINE, + CPUHP_AP_BLK_MQ_ONLINE, + CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, + CPUHP_AP_X86_INTEL_EPB_ONLINE, + CPUHP_AP_PERF_ONLINE, + CPUHP_AP_PERF_X86_ONLINE, + CPUHP_AP_PERF_X86_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, + CPUHP_AP_PERF_X86_RAPL_ONLINE, + CPUHP_AP_PERF_X86_CQM_ONLINE, + CPUHP_AP_PERF_X86_CSTATE_ONLINE, + CPUHP_AP_PERF_S390_CF_ONLINE, + CPUHP_AP_PERF_S390_SF_ONLINE, + CPUHP_AP_PERF_ARM_CCI_ONLINE, + CPUHP_AP_PERF_ARM_CCN_ONLINE, + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + CPUHP_AP_PERF_ARM_L2X0_ONLINE, + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, + CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, + CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, + CPUHP_AP_WATCHDOG_ONLINE, + CPUHP_AP_WORKQUEUE_ONLINE, + CPUHP_AP_RCUTREE_ONLINE, + CPUHP_AP_BASE_CACHEINFO_ONLINE, + CPUHP_AP_ONLINE_DYN, + CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, + CPUHP_AP_X86_HPET_ONLINE, + CPUHP_AP_X86_KVM_CLK_ONLINE, + CPUHP_AP_ACTIVE, + CPUHP_ONLINE, +}; + +int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu), bool multi_instance); + +int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, + bool invoke, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu), + bool multi_instance); +# 215 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_setup_state(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state(state, name, true, startup, teardown, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_setup_state_cpuslocked(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state_cpuslocked(state, name, true, startup, + teardown, false); +} +# 243 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_setup_state_nocalls(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state(state, name, false, startup, teardown, + false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu), + int (*teardown)(unsigned int cpu)) +{ + return __cpuhp_setup_state_cpuslocked(state, name, false, startup, + teardown, false); +} +# 273 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_setup_state_multi(enum cpuhp_state state, + const char *name, + int (*startup)(unsigned int cpu, + struct hlist_node *node), + int (*teardown)(unsigned int cpu, + struct hlist_node *node)) +{ + return __cpuhp_setup_state(state, name, false, + (void *) startup, + (void *) teardown, true); +} + +int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, + bool invoke); +int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, + struct hlist_node *node, bool invoke); +# 300 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_state_add_instance(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_add_instance(state, node, true); +} +# 315 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_add_instance(state, node, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_add_instance_cpuslocked(state, node, false); +} + +void __cpuhp_remove_state(enum cpuhp_state state, bool invoke); +void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke); +# 338 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpuhp_remove_state(enum cpuhp_state state) +{ + __cpuhp_remove_state(state, true); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpuhp_remove_state_nocalls(enum cpuhp_state state) +{ + __cpuhp_remove_state(state, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state) +{ + __cpuhp_remove_state_cpuslocked(state, false); +} +# 366 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpuhp_remove_multi_state(enum cpuhp_state state) +{ + __cpuhp_remove_state(state, false); +} + +int __cpuhp_state_remove_instance(enum cpuhp_state state, + struct hlist_node *node, bool invoke); +# 383 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_state_remove_instance(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_remove_instance(state, node, true); +} +# 397 "./include/linux/cpuhotplug.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cpuhp_state_remove_instance_nocalls(enum cpuhp_state state, + struct hlist_node *node) +{ + return __cpuhp_state_remove_instance(state, node, false); +} + + +void cpuhp_online_idle(enum cpuhp_state state); +# 21 "./include/linux/cpu.h" 2 + +struct device; +struct device_node; +struct attribute_group; + +struct cpu { + int node_id; + int hotpluggable; + struct device dev; +}; + +extern void boot_cpu_init(void); +extern void boot_cpu_hotplug_init(void); +extern void cpu_init(void); +extern void trap_init(void); + +extern int register_cpu(struct cpu *cpu, int num); +extern struct device *get_cpu_device(unsigned cpu); +extern bool cpu_is_hotpluggable(unsigned cpu); +extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id); +extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun, + int cpu, unsigned int *thread); + +extern int cpu_add_dev_attr(struct device_attribute *attr); +extern void cpu_remove_dev_attr(struct device_attribute *attr); + +extern int cpu_add_dev_attr_group(struct attribute_group *attrs); +extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); + +extern ssize_t cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mds(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_tsx_async_abort(struct device *dev, + struct device_attribute *attr, + char *buf); +extern ssize_t cpu_show_itlb_multihit(struct device *dev, + struct device_attribute *attr, char *buf); + +extern __attribute__((__format__(printf, 4, 5))) +struct device *cpu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); + +extern void unregister_cpu(struct cpu *cpu); +extern ssize_t arch_cpu_probe(const char *, size_t); +extern ssize_t arch_cpu_release(const char *, size_t); +# 90 "./include/linux/cpu.h" +extern bool cpuhp_tasks_frozen; +int add_cpu(unsigned int cpu); +int cpu_device_up(struct device *dev); +void notify_cpu_starting(unsigned int cpu); +extern void cpu_maps_update_begin(void); +extern void cpu_maps_update_done(void); +int bringup_hibernate_cpu(unsigned int sleep_cpu); +void bringup_nonboot_cpus(unsigned int setup_max_cpus); +# 111 "./include/linux/cpu.h" +extern struct bus_type cpu_subsys; + + +extern void cpus_write_lock(void); +extern void cpus_write_unlock(void); +extern void cpus_read_lock(void); +extern void cpus_read_unlock(void); +extern int cpus_read_trylock(void); +extern void lockdep_assert_cpus_held(void); +extern void cpu_hotplug_disable(void); +extern void cpu_hotplug_enable(void); +void clear_tasks_mm_cpumask(int cpu); +int remove_cpu(unsigned int cpu); +int cpu_device_down(struct device *dev); +extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); +# 141 "./include/linux/cpu.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_hotplug_begin(void) { cpus_write_lock(); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cpu_hotplug_done(void) { cpus_write_unlock(); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_online_cpus(void) { cpus_read_lock(); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_online_cpus(void) { cpus_read_unlock(); } + + +extern int freeze_secondary_cpus(int primary); +extern void thaw_secondary_cpus(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int suspend_disable_secondary_cpus(void) +{ + int cpu = 0; + + if (0) + cpu = -1; + + return freeze_secondary_cpus(cpu); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void suspend_enable_secondary_cpus(void) +{ + return thaw_secondary_cpus(); +} + + + + + + + +void cpu_startup_entry(enum cpuhp_state state); + +void cpu_idle_poll_ctrl(bool enable); + + + + +bool cpu_in_idle(unsigned long pc); + +void arch_cpu_idle(void); +void arch_cpu_idle_prepare(void); +void arch_cpu_idle_enter(void); +void arch_cpu_idle_exit(void); +void arch_cpu_idle_dead(void); + +int cpu_report_state(int cpu); +int cpu_check_up_prepare(int cpu); +void cpu_set_state_online(int cpu); +void play_idle_precise(u64 duration_ns, u64 latency_ns); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void play_idle(unsigned long duration_us) +{ + play_idle_precise(duration_us * 1000L, ((u64)~0ULL)); +} + + +bool cpu_wait_death(unsigned int cpu, int seconds); +bool cpu_report_death(void); +void cpuhp_report_idle_dead(void); + + + + +enum cpuhp_smt_control { + CPU_SMT_ENABLED, + CPU_SMT_DISABLED, + CPU_SMT_FORCE_DISABLED, + CPU_SMT_NOT_SUPPORTED, + CPU_SMT_NOT_IMPLEMENTED, +}; + + +extern enum cpuhp_smt_control cpu_smt_control; +extern void cpu_smt_disable(bool force); +extern void cpu_smt_check_topology(void); +extern bool cpu_smt_possible(void); +extern int cpuhp_smt_enable(void); +extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval); +# 227 "./include/linux/cpu.h" +extern bool cpu_mitigations_off(void); +extern bool cpu_mitigations_auto_nosmt(void); +# 51 "./include/linux/perf_event.h" 2 +# 1 "./include/linux/irq_work.h" 1 +# 31 "./include/linux/irq_work.h" +struct irq_work { + struct llist_node llnode; + atomic_t flags; + void (*func)(struct irq_work *); +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) +{ + atomic_set(&work->flags, 0); + work->func = func; +} + + + + + + + +bool irq_work_queue(struct irq_work *work); +bool irq_work_queue_on(struct irq_work *work, int cpu); + +void irq_work_tick(void); +void irq_work_sync(struct irq_work *work); + + +# 1 "./arch/x86/include/asm/irq_work.h" 1 + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_irq_work_has_interrupt(void) +{ + return (__builtin_constant_p(( 0*32+ 9)) && ( (((( 0*32+ 9))>>5)==(0) && (1UL<<((( 0*32+ 9))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 0*32+ 9))>>5)==(1) && (1UL<<((( 0*32+ 9))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 0*32+ 9))>>5)==(2) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(3) && (1UL<<((( 0*32+ 9))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 0*32+ 9))>>5)==(4) && (1UL<<((( 0*32+ 9))&31) & (0) )) || (((( 0*32+ 9))>>5)==(5) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(6) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(7) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(8) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(9) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(10) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(11) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(12) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(13) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(14) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(15) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(16) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(17) && (1UL<<((( 0*32+ 9))&31) & 0 )) || (((( 0*32+ 9))>>5)==(18) && (1UL<<((( 0*32+ 9))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 0*32+ 9), (unsigned long *)((&boot_cpu_data)->x86_capability))); +} +extern void arch_irq_work_raise(void); +# 58 "./include/linux/irq_work.h" 2 + +void irq_work_run(void); +bool irq_work_needs_cpu(void); +void irq_work_single(void *arg); +# 52 "./include/linux/perf_event.h" 2 +# 1 "./include/linux/static_key.h" 1 +# 53 "./include/linux/perf_event.h" 2 +# 1 "./include/linux/jump_label_ratelimit.h" 1 +# 9 "./include/linux/jump_label_ratelimit.h" +struct static_key_deferred { + struct static_key key; + unsigned long timeout; + struct delayed_work work; +}; + +struct static_key_true_deferred { + struct static_key_true key; + unsigned long timeout; + struct delayed_work work; +}; + +struct static_key_false_deferred { + struct static_key_false key; + unsigned long timeout; + struct delayed_work work; +}; +# 35 "./include/linux/jump_label_ratelimit.h" +extern void +__static_key_slow_dec_deferred(struct static_key *key, + struct delayed_work *work, + unsigned long timeout); +extern void __static_key_deferred_flush(void *key, struct delayed_work *work); +extern void +jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); + +extern void jump_label_update_timeout(struct work_struct *work); +# 54 "./include/linux/perf_event.h" 2 + + +# 1 "./include/linux/perf_regs.h" 1 + + + + + + +struct perf_regs { + __u64 abi; + struct pt_regs *regs; +}; + + +# 1 "./arch/x86/include/uapi/asm/perf_regs.h" 1 + + + + +enum perf_event_x86_regs { + PERF_REG_X86_AX, + PERF_REG_X86_BX, + PERF_REG_X86_CX, + PERF_REG_X86_DX, + PERF_REG_X86_SI, + PERF_REG_X86_DI, + PERF_REG_X86_BP, + PERF_REG_X86_SP, + PERF_REG_X86_IP, + PERF_REG_X86_FLAGS, + PERF_REG_X86_CS, + PERF_REG_X86_SS, + PERF_REG_X86_DS, + PERF_REG_X86_ES, + PERF_REG_X86_FS, + PERF_REG_X86_GS, + PERF_REG_X86_R8, + PERF_REG_X86_R9, + PERF_REG_X86_R10, + PERF_REG_X86_R11, + PERF_REG_X86_R12, + PERF_REG_X86_R13, + PERF_REG_X86_R14, + PERF_REG_X86_R15, + + PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1, + PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1, + + + PERF_REG_X86_XMM0 = 32, + PERF_REG_X86_XMM1 = 34, + PERF_REG_X86_XMM2 = 36, + PERF_REG_X86_XMM3 = 38, + PERF_REG_X86_XMM4 = 40, + PERF_REG_X86_XMM5 = 42, + PERF_REG_X86_XMM6 = 44, + PERF_REG_X86_XMM7 = 46, + PERF_REG_X86_XMM8 = 48, + PERF_REG_X86_XMM9 = 50, + PERF_REG_X86_XMM10 = 52, + PERF_REG_X86_XMM11 = 54, + PERF_REG_X86_XMM12 = 56, + PERF_REG_X86_XMM13 = 58, + PERF_REG_X86_XMM14 = 60, + PERF_REG_X86_XMM15 = 62, + + + PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, +}; +# 14 "./include/linux/perf_regs.h" 2 + + + + + +u64 perf_reg_value(struct pt_regs *regs, int idx); +int perf_reg_validate(u64 mask); +u64 perf_reg_abi(struct task_struct *task); +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy); +# 57 "./include/linux/perf_event.h" 2 +# 1 "./include/linux/cgroup.h" 1 +# 16 "./include/linux/cgroup.h" +# 1 "./include/uapi/linux/cgroupstats.h" 1 +# 20 "./include/uapi/linux/cgroupstats.h" +# 1 "./include/uapi/linux/taskstats.h" 1 +# 41 "./include/uapi/linux/taskstats.h" +struct taskstats { + + + + + + __u16 version; + __u32 ac_exitcode; + + + + + __u8 ac_flag; + __u8 ac_nice; +# 72 "./include/uapi/linux/taskstats.h" + __u64 cpu_count __attribute__((aligned(8))); + __u64 cpu_delay_total; + + + + + + + __u64 blkio_count; + __u64 blkio_delay_total; + + + __u64 swapin_count; + __u64 swapin_delay_total; + + + + + + + + __u64 cpu_run_real_total; + + + + + + + + __u64 cpu_run_virtual_total; + + + + + char ac_comm[32]; + __u8 ac_sched __attribute__((aligned(8))); + + __u8 ac_pad[3]; + __u32 ac_uid __attribute__((aligned(8))); + + __u32 ac_gid; + __u32 ac_pid; + __u32 ac_ppid; + + __u32 ac_btime; + __u64 ac_etime __attribute__((aligned(8))); + + __u64 ac_utime; + __u64 ac_stime; + __u64 ac_minflt; + __u64 ac_majflt; +# 132 "./include/uapi/linux/taskstats.h" + __u64 coremem; + + + + __u64 virtmem; + + + + + __u64 hiwater_rss; + __u64 hiwater_vm; + + + __u64 read_char; + __u64 write_char; + __u64 read_syscalls; + __u64 write_syscalls; + + + + + __u64 read_bytes; + __u64 write_bytes; + __u64 cancelled_write_bytes; + + __u64 nvcsw; + __u64 nivcsw; + + + __u64 ac_utimescaled; + __u64 ac_stimescaled; + __u64 cpu_scaled_run_real_total; + + + __u64 freepages_count; + __u64 freepages_delay_total; + + + __u64 thrashing_count; + __u64 thrashing_delay_total; + + + __u64 ac_btime64; +}; +# 184 "./include/uapi/linux/taskstats.h" +enum { + TASKSTATS_CMD_UNSPEC = 0, + TASKSTATS_CMD_GET, + TASKSTATS_CMD_NEW, + __TASKSTATS_CMD_MAX, +}; + + + +enum { + TASKSTATS_TYPE_UNSPEC = 0, + TASKSTATS_TYPE_PID, + TASKSTATS_TYPE_TGID, + TASKSTATS_TYPE_STATS, + TASKSTATS_TYPE_AGGR_PID, + TASKSTATS_TYPE_AGGR_TGID, + TASKSTATS_TYPE_NULL, + __TASKSTATS_TYPE_MAX, +}; + + + +enum { + TASKSTATS_CMD_ATTR_UNSPEC = 0, + TASKSTATS_CMD_ATTR_PID, + TASKSTATS_CMD_ATTR_TGID, + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK, + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK, + __TASKSTATS_CMD_ATTR_MAX, +}; +# 21 "./include/uapi/linux/cgroupstats.h" 2 +# 32 "./include/uapi/linux/cgroupstats.h" +struct cgroupstats { + __u64 nr_sleeping; + __u64 nr_running; + __u64 nr_stopped; + __u64 nr_uninterruptible; + + __u64 nr_io_wait; +}; + + + + + + + +enum { + CGROUPSTATS_CMD_UNSPEC = __TASKSTATS_CMD_MAX, + CGROUPSTATS_CMD_GET, + CGROUPSTATS_CMD_NEW, + __CGROUPSTATS_CMD_MAX, +}; + + + +enum { + CGROUPSTATS_TYPE_UNSPEC = 0, + CGROUPSTATS_TYPE_CGROUP_STATS, + __CGROUPSTATS_TYPE_MAX, +}; + + + +enum { + CGROUPSTATS_CMD_ATTR_UNSPEC = 0, + CGROUPSTATS_CMD_ATTR_FD, + __CGROUPSTATS_CMD_ATTR_MAX, +}; +# 17 "./include/linux/cgroup.h" 2 + + + + + + + +# 1 "./include/linux/user_namespace.h" 1 +# 17 "./include/linux/user_namespace.h" +struct uid_gid_extent { + u32 first; + u32 lower_first; + u32 count; +}; + +struct uid_gid_map { + u32 nr_extents; + union { + struct uid_gid_extent extent[5]; + struct { + struct uid_gid_extent *forward; + struct uid_gid_extent *reverse; + }; + }; +}; + + + + + +struct ucounts; + +enum ucount_type { + UCOUNT_USER_NAMESPACES, + UCOUNT_PID_NAMESPACES, + UCOUNT_UTS_NAMESPACES, + UCOUNT_IPC_NAMESPACES, + UCOUNT_NET_NAMESPACES, + UCOUNT_MNT_NAMESPACES, + UCOUNT_CGROUP_NAMESPACES, + UCOUNT_TIME_NAMESPACES, + + UCOUNT_INOTIFY_INSTANCES, + UCOUNT_INOTIFY_WATCHES, + + UCOUNT_COUNTS, +}; + +struct user_namespace { + struct uid_gid_map uid_map; + struct uid_gid_map gid_map; + struct uid_gid_map projid_map; + atomic_t count; + struct user_namespace *parent; + int level; + kuid_t owner; + kgid_t group; + struct ns_common ns; + unsigned long flags; + + + + + + + + struct list_head keyring_name_list; + struct key *user_keyring_register; + struct rw_semaphore keyring_sem; + + + + + struct key *persistent_keyring_register; + + struct work_struct work; + + struct ctl_table_set set; + struct ctl_table_header *sysctls; + + struct ucounts *ucounts; + int ucount_max[UCOUNT_COUNTS]; +} __attribute__((__designated_init__)); + +struct ucounts { + struct hlist_node node; + struct user_namespace *ns; + kuid_t uid; + int count; + atomic_t ucount[UCOUNT_COUNTS]; +}; + +extern struct user_namespace init_user_ns; + +bool setup_userns_sysctls(struct user_namespace *ns); +void retire_userns_sysctls(struct user_namespace *ns); +struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type); +void dec_ucount(struct ucounts *ucounts, enum ucount_type type); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct user_namespace *get_user_ns(struct user_namespace *ns) +{ + if (ns) + atomic_inc(&ns->count); + return ns; +} + +extern int create_user_ns(struct cred *new); +extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred); +extern void __put_user_ns(struct user_namespace *ns); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_user_ns(struct user_namespace *ns) +{ + if (ns && atomic_dec_and_test(&ns->count)) + __put_user_ns(ns); +} + +struct seq_operations; +extern const struct seq_operations proc_uid_seq_operations; +extern const struct seq_operations proc_gid_seq_operations; +extern const struct seq_operations proc_projid_seq_operations; +extern ssize_t proc_uid_map_write(struct file *, const char *, size_t, loff_t *); +extern ssize_t proc_gid_map_write(struct file *, const char *, size_t, loff_t *); +extern ssize_t proc_projid_map_write(struct file *, const char *, size_t, loff_t *); +extern ssize_t proc_setgroups_write(struct file *, const char *, size_t, loff_t *); +extern int proc_setgroups_show(struct seq_file *m, void *v); +extern bool userns_may_setgroups(const struct user_namespace *ns); +extern bool in_userns(const struct user_namespace *ancestor, + const struct user_namespace *child); +extern bool current_in_userns(const struct user_namespace *target_ns); +struct ns_common *ns_get_owner(struct ns_common *ns); +# 25 "./include/linux/cgroup.h" 2 + +# 1 "./include/linux/kernel_stat.h" 1 +# 9 "./include/linux/kernel_stat.h" +# 1 "./include/linux/interrupt.h" 1 +# 9 "./include/linux/interrupt.h" +# 1 "./include/linux/irqreturn.h" 1 +# 11 "./include/linux/irqreturn.h" +enum irqreturn { + IRQ_NONE = (0 << 0), + IRQ_HANDLED = (1 << 0), + IRQ_WAKE_THREAD = (1 << 1), +}; + +typedef enum irqreturn irqreturn_t; +# 10 "./include/linux/interrupt.h" 2 +# 1 "./include/linux/irqnr.h" 1 + + + + +# 1 "./include/uapi/linux/irqnr.h" 1 +# 6 "./include/linux/irqnr.h" 2 + + +extern int nr_irqs; +extern struct irq_desc *irq_to_desc(unsigned int irq); +unsigned int irq_get_next_irq(unsigned int offset); +# 11 "./include/linux/interrupt.h" 2 +# 19 "./include/linux/interrupt.h" +# 1 "./arch/x86/include/asm/irq.h" 1 +# 21 "./arch/x86/include/asm/irq.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int irq_canonicalize(int irq) +{ + return ((irq == 2) ? 9 : irq); +} + +extern int irq_init_percpu_irqstack(unsigned int cpu); + + + +struct irq_desc; + +extern void fixup_irqs(void); + + +extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); + + +extern void (*x86_platform_ipi_callback)(void); +extern void native_init_IRQ(void); + +extern void __handle_irq(struct irq_desc *desc, struct pt_regs *regs); + +extern __attribute__((__externally_visible__)) void do_IRQ(struct pt_regs *regs, unsigned long vector); + +extern void init_ISA_irqs(void); + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) init_IRQ(void); + + +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, + bool exclude_self); +# 20 "./include/linux/interrupt.h" 2 +# 87 "./include/linux/interrupt.h" +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED, +}; + +typedef irqreturn_t (*irq_handler_t)(int, void *); +# 110 "./include/linux/interrupt.h" +struct irqaction { + irq_handler_t handler; + void *dev_id; + void *percpu_dev_id; + struct irqaction *next; + irq_handler_t thread_fn; + struct task_struct *thread; + struct irqaction *secondary; + unsigned int irq; + unsigned int flags; + unsigned long thread_flags; + unsigned long thread_mask; + const char *name; + struct proc_dir_entry *dir; +} __attribute__((__aligned__(1 << (12)))); + +extern irqreturn_t no_action(int cpl, void *dev_id); +# 138 "./include/linux/interrupt.h" +extern int __attribute__((__warn_unused_result__)) +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev); +# 156 "./include/linux/interrupt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev) +{ + return request_threaded_irq(irq, handler, ((void *)0), flags, name, dev); +} + +extern int __attribute__((__warn_unused_result__)) +request_any_context_irq(unsigned int irq, irq_handler_t handler, + unsigned long flags, const char *name, void *dev_id); + +extern int __attribute__((__warn_unused_result__)) +__request_percpu_irq(unsigned int irq, irq_handler_t handler, + unsigned long flags, const char *devname, + void *percpu_dev_id); + +extern int __attribute__((__warn_unused_result__)) +request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) +request_percpu_irq(unsigned int irq, irq_handler_t handler, + const char *devname, void *percpu_dev_id) +{ + return __request_percpu_irq(irq, handler, 0, + devname, percpu_dev_id); +} + +extern int __attribute__((__warn_unused_result__)) +request_percpu_nmi(unsigned int irq, irq_handler_t handler, + const char *devname, void *dev); + +extern const void *free_irq(unsigned int, void *); +extern void free_percpu_irq(unsigned int, void *); + +extern const void *free_nmi(unsigned int irq, void *dev_id); +extern void free_percpu_nmi(unsigned int irq, void *percpu_dev_id); + +struct device; + +extern int __attribute__((__warn_unused_result__)) +devm_request_threaded_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, irq_handler_t thread_fn, + unsigned long irqflags, const char *devname, + void *dev_id); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __attribute__((__warn_unused_result__)) +devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +{ + return devm_request_threaded_irq(dev, irq, handler, ((void *)0), irqflags, + devname, dev_id); +} + +extern int __attribute__((__warn_unused_result__)) +devm_request_any_context_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id); + +extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); +# 235 "./include/linux/interrupt.h" +extern void disable_irq_nosync(unsigned int irq); +extern bool disable_hardirq(unsigned int irq); +extern void disable_irq(unsigned int irq); +extern void disable_percpu_irq(unsigned int irq); +extern void enable_irq(unsigned int irq); +extern void enable_percpu_irq(unsigned int irq, unsigned int type); +extern bool irq_percpu_is_enabled(unsigned int irq); +extern void irq_wake_thread(unsigned int irq, void *dev_id); + +extern void disable_nmi_nosync(unsigned int irq); +extern void disable_percpu_nmi(unsigned int irq); +extern void enable_nmi(unsigned int irq); +extern void enable_percpu_nmi(unsigned int irq, unsigned int type); +extern int prepare_percpu_nmi(unsigned int irq); +extern void teardown_percpu_nmi(unsigned int irq); + +extern int irq_inject_interrupt(unsigned int irq); + + +extern void suspend_device_irqs(void); +extern void resume_device_irqs(void); +extern void rearm_wake_irq(unsigned int irq); +# 270 "./include/linux/interrupt.h" +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); +}; +# 294 "./include/linux/interrupt.h" +struct irq_affinity { + unsigned int pre_vectors; + unsigned int post_vectors; + unsigned int nr_sets; + unsigned int set_size[4]; + void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); + void *priv; +}; + + + + + + +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed : 1; +}; + + + +extern cpumask_var_t irq_default_affinity; + + +extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, + bool force); +# 328 "./include/linux/interrupt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, false); +} +# 345 "./include/linux/interrupt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return __irq_set_affinity(irq, cpumask, true); +} + +extern int irq_can_set_affinity(unsigned int irq); +extern int irq_select_affinity(unsigned int irq); + +extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); + +extern int +irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); + +struct irq_affinity_desc * +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); + +unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, + const struct irq_affinity *affd); +# 422 "./include/linux/interrupt.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void disable_irq_nosync_lockdep(unsigned int irq) +{ + disable_irq_nosync(irq); + + do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) +{ + disable_irq_nosync(irq); + + do { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); *flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void disable_irq_lockdep(unsigned int irq) +{ + disable_irq(irq); + + do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void enable_irq_lockdep(unsigned int irq) +{ + + do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); + + enable_irq(irq); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) +{ + + do { if (({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); })) { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(*flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(*flags); } while (0); } } while (0); + + enable_irq(irq); +} + + +extern int irq_set_irq_wake(unsigned int irq, unsigned int on); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int enable_irq_wake(unsigned int irq) +{ + return irq_set_irq_wake(irq, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int disable_irq_wake(unsigned int irq) +{ + return irq_set_irq_wake(irq, 0); +} + + + + +enum irqchip_irq_state { + IRQCHIP_STATE_PENDING, + IRQCHIP_STATE_ACTIVE, + IRQCHIP_STATE_MASKED, + IRQCHIP_STATE_LINE_LEVEL, +}; + +extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, + bool *state); +extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, + bool state); + + + + + +extern bool force_irqthreads; +# 528 "./include/linux/interrupt.h" +enum +{ + HI_SOFTIRQ=0, + TIMER_SOFTIRQ, + NET_TX_SOFTIRQ, + NET_RX_SOFTIRQ, + BLOCK_SOFTIRQ, + IRQ_POLL_SOFTIRQ, + TASKLET_SOFTIRQ, + SCHED_SOFTIRQ, + HRTIMER_SOFTIRQ, + RCU_SOFTIRQ, + + NR_SOFTIRQS +}; + + + + + + +extern const char * const softirq_to_name[NR_SOFTIRQS]; + + + + + +struct softirq_action +{ + void (*action)(struct softirq_action *); +}; + + void do_softirq(void); + void __do_softirq(void); + + +void do_softirq_own_stack(void); + + + + + + + +extern void open_softirq(int nr, void (*action)(struct softirq_action *)); +extern void softirq_init(void); +extern void __raise_softirq_irqoff(unsigned int nr); + +extern void raise_softirq_irqoff(unsigned int nr); +extern void raise_softirq(unsigned int nr); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_ksoftirqd; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) ksoftirqd; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct task_struct *this_cpu_ksoftirqd(void) +{ + return ({ typeof(ksoftirqd) pscr_ret__; do { const void *__vpp_verify = (typeof((&(ksoftirqd)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(ksoftirqd)) { case 1: pscr_ret__ = ({ typeof(ksoftirqd) pfo_ret__; switch (sizeof(ksoftirqd)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (ksoftirqd)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(ksoftirqd) pfo_ret__; switch (sizeof(ksoftirqd)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (ksoftirqd)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(ksoftirqd) pfo_ret__; switch (sizeof(ksoftirqd)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (ksoftirqd)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(ksoftirqd) pfo_ret__; switch (sizeof(ksoftirqd)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (ksoftirqd)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (ksoftirqd)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); +} +# 606 "./include/linux/interrupt.h" +struct tasklet_struct +{ + struct tasklet_struct *next; + unsigned long state; + atomic_t count; + void (*func)(unsigned long); + unsigned long data; +}; +# 622 "./include/linux/interrupt.h" +enum +{ + TASKLET_STATE_SCHED, + TASKLET_STATE_RUN +}; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int tasklet_trylock(struct tasklet_struct *t) +{ + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tasklet_unlock(struct tasklet_struct *t) +{ + do { } while (0); + clear_bit(TASKLET_STATE_RUN, &(t)->state); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tasklet_unlock_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { __asm__ __volatile__("": : :"memory"); } +} + + + + + + +extern void __tasklet_schedule(struct tasklet_struct *t); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tasklet_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_schedule(t); +} + +extern void __tasklet_hi_schedule(struct tasklet_struct *t); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tasklet_hi_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule(t); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tasklet_disable_nosync(struct tasklet_struct *t) +{ + atomic_inc(&t->count); + do { } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tasklet_disable(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_wait(t); + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void tasklet_enable(struct tasklet_struct *t) +{ + do { } while (0); + atomic_dec(&t->count); +} + +extern void tasklet_kill(struct tasklet_struct *t); +extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); +extern void tasklet_init(struct tasklet_struct *t, + void (*func)(unsigned long), unsigned long data); +# 732 "./include/linux/interrupt.h" +extern unsigned long probe_irq_on(void); +extern int probe_irq_off(unsigned long); +extern unsigned int probe_irq_mask(unsigned long); + + + + +extern void init_irq_proc(void); +# 752 "./include/linux/interrupt.h" +struct seq_file; +int show_interrupts(struct seq_file *p, void *v); +int arch_show_interrupts(struct seq_file *p, int prec); + +extern int early_irq_init(void); +extern int arch_probe_nr_irqs(void); +extern int arch_early_irq_init(void); +# 10 "./include/linux/kernel_stat.h" 2 +# 20 "./include/linux/kernel_stat.h" +enum cpu_usage_stat { + CPUTIME_USER, + CPUTIME_NICE, + CPUTIME_SYSTEM, + CPUTIME_SOFTIRQ, + CPUTIME_IRQ, + CPUTIME_IDLE, + CPUTIME_IOWAIT, + CPUTIME_STEAL, + CPUTIME_GUEST, + CPUTIME_GUEST_NICE, + NR_STATS, +}; + +struct kernel_cpustat { + u64 cpustat[NR_STATS]; +}; + +struct kernel_stat { + unsigned long irqs_sum; + unsigned int softirqs[NR_SOFTIRQS]; +}; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_kstat; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct kernel_stat) kstat; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_kernel_cpustat; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct kernel_cpustat) kernel_cpustat; + + + + + + + +extern unsigned long long nr_context_switches(void); + +extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); +extern void kstat_incr_irq_this_cpu(unsigned int irq); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kstat_incr_softirqs_this_cpu(unsigned int irq) +{ + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(kstat.softirqs[irq])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(kstat.softirqs[irq])) { case 1: do { typedef typeof((kstat.softirqs[irq])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((kstat.softirqs[irq]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((kstat.softirqs[irq])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((kstat.softirqs[irq]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((kstat.softirqs[irq])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((kstat.softirqs[irq]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((kstat.softirqs[irq])) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((kstat.softirqs[irq]))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq]))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((kstat.softirqs[irq])) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) +{ + return (*({ do { const void *__vpp_verify = (typeof((&(kstat)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(kstat)))) *)((&(kstat))))); (typeof((typeof(*((&(kstat)))) *)((&(kstat))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })).softirqs[irq]; +} + + + + +extern unsigned int kstat_irqs(unsigned int irq); +extern unsigned int kstat_irqs_usr(unsigned int irq); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int kstat_cpu_irqs_sum(unsigned int cpu) +{ + return (*({ do { const void *__vpp_verify = (typeof((&(kstat)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(kstat)))) *)((&(kstat))))); (typeof((typeof(*((&(kstat)))) *)((&(kstat))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })).irqs_sum; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 kcpustat_field(struct kernel_cpustat *kcpustat, + enum cpu_usage_stat usage, int cpu) +{ + return kcpustat->cpustat[usage]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu) +{ + *dst = (*({ do { const void *__vpp_verify = (typeof((&(kernel_cpustat)) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(kernel_cpustat)))) *)((&(kernel_cpustat))))); (typeof((typeof(*((&(kernel_cpustat)))) *)((&(kernel_cpustat))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); +} + + + +extern void account_user_time(struct task_struct *, u64); +extern void account_guest_time(struct task_struct *, u64); +extern void account_system_time(struct task_struct *, int, u64); +extern void account_system_index_time(struct task_struct *, u64, + enum cpu_usage_stat); +extern void account_steal_time(u64); +extern void account_idle_time(u64); + + + + + + + +extern void account_process_tick(struct task_struct *, int user); + + +extern void account_idle_ticks(unsigned long ticks); +# 27 "./include/linux/cgroup.h" 2 + +# 1 "./include/linux/cgroup-defs.h" 1 +# 20 "./include/linux/cgroup-defs.h" +# 1 "./include/linux/u64_stats_sync.h" 1 +# 68 "./include/linux/u64_stats_sync.h" +struct u64_stats_sync { + + + +}; + + +# 1 "./arch/x86/include/asm/local64.h" 1 +# 76 "./include/linux/u64_stats_sync.h" 2 + +typedef struct { + local64_t v; +} u64_stats_t ; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 u64_stats_read(const u64_stats_t *p) +{ + return atomic_long_read(&(&(&p->v)->a)->a); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void u64_stats_add(u64_stats_t *p, unsigned long val) +{ + local_add((val),(&(&p->v)->a)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void u64_stats_inc(u64_stats_t *p) +{ + local_inc(&(&p->v)->a); +} +# 118 "./include/linux/u64_stats_sync.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void u64_stats_init(struct u64_stats_sync *syncp) +{ + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void u64_stats_update_begin(struct u64_stats_sync *syncp) +{ + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void u64_stats_update_end(struct u64_stats_sync *syncp) +{ + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) +{ + unsigned long flags = 0; + + + + + + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, + unsigned long flags) +{ + + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ + + + + return 0; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ + + + + return __u64_stats_fetch_begin(syncp); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ + + + + return false; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ + + + + return __u64_stats_fetch_retry(syncp, start); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) +{ + + + + return __u64_stats_fetch_begin(syncp); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, + unsigned int start) +{ + + + + return __u64_stats_fetch_retry(syncp, start); +} +# 21 "./include/linux/cgroup-defs.h" 2 + +# 1 "./include/linux/bpf-cgroup.h" 1 + + + + +# 1 "./include/linux/bpf.h" 1 + + + + + + +# 1 "./include/uapi/linux/bpf.h" 1 +# 12 "./include/uapi/linux/bpf.h" +# 1 "./include/uapi/linux/bpf_common.h" 1 +# 13 "./include/uapi/linux/bpf.h" 2 +# 47 "./include/uapi/linux/bpf.h" +enum { + BPF_REG_0 = 0, + BPF_REG_1, + BPF_REG_2, + BPF_REG_3, + BPF_REG_4, + BPF_REG_5, + BPF_REG_6, + BPF_REG_7, + BPF_REG_8, + BPF_REG_9, + BPF_REG_10, + __MAX_BPF_REG, +}; + + + + +struct bpf_insn { + __u8 code; + __u8 dst_reg:4; + __u8 src_reg:4; + __s16 off; + __s32 imm; +}; + + +struct bpf_lpm_trie_key { + __u32 prefixlen; + __u8 data[0]; +}; + +struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; +}; + + +enum bpf_cmd { + BPF_MAP_CREATE, + BPF_MAP_LOOKUP_ELEM, + BPF_MAP_UPDATE_ELEM, + BPF_MAP_DELETE_ELEM, + BPF_MAP_GET_NEXT_KEY, + BPF_PROG_LOAD, + BPF_OBJ_PIN, + BPF_OBJ_GET, + BPF_PROG_ATTACH, + BPF_PROG_DETACH, + BPF_PROG_TEST_RUN, + BPF_PROG_GET_NEXT_ID, + BPF_MAP_GET_NEXT_ID, + BPF_PROG_GET_FD_BY_ID, + BPF_MAP_GET_FD_BY_ID, + BPF_OBJ_GET_INFO_BY_FD, + BPF_PROG_QUERY, + BPF_RAW_TRACEPOINT_OPEN, + BPF_BTF_LOAD, + BPF_BTF_GET_FD_BY_ID, + BPF_TASK_FD_QUERY, + BPF_MAP_LOOKUP_AND_DELETE_ELEM, + BPF_MAP_FREEZE, + BPF_BTF_GET_NEXT_ID, + BPF_MAP_LOOKUP_BATCH, + BPF_MAP_LOOKUP_AND_DELETE_BATCH, + BPF_MAP_UPDATE_BATCH, + BPF_MAP_DELETE_BATCH, + BPF_LINK_CREATE, + BPF_LINK_UPDATE, + BPF_LINK_GET_FD_BY_ID, + BPF_LINK_GET_NEXT_ID, + BPF_ENABLE_STATS, + BPF_ITER_CREATE, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC, + BPF_MAP_TYPE_HASH, + BPF_MAP_TYPE_ARRAY, + BPF_MAP_TYPE_PROG_ARRAY, + BPF_MAP_TYPE_PERF_EVENT_ARRAY, + BPF_MAP_TYPE_PERCPU_HASH, + BPF_MAP_TYPE_PERCPU_ARRAY, + BPF_MAP_TYPE_STACK_TRACE, + BPF_MAP_TYPE_CGROUP_ARRAY, + BPF_MAP_TYPE_LRU_HASH, + BPF_MAP_TYPE_LRU_PERCPU_HASH, + BPF_MAP_TYPE_LPM_TRIE, + BPF_MAP_TYPE_ARRAY_OF_MAPS, + BPF_MAP_TYPE_HASH_OF_MAPS, + BPF_MAP_TYPE_DEVMAP, + BPF_MAP_TYPE_SOCKMAP, + BPF_MAP_TYPE_CPUMAP, + BPF_MAP_TYPE_XSKMAP, + BPF_MAP_TYPE_SOCKHASH, + BPF_MAP_TYPE_CGROUP_STORAGE, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + BPF_MAP_TYPE_QUEUE, + BPF_MAP_TYPE_STACK, + BPF_MAP_TYPE_SK_STORAGE, + BPF_MAP_TYPE_DEVMAP_HASH, + BPF_MAP_TYPE_STRUCT_OPS, + BPF_MAP_TYPE_RINGBUF, +}; +# 161 "./include/uapi/linux/bpf.h" +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC, + BPF_PROG_TYPE_SOCKET_FILTER, + BPF_PROG_TYPE_KPROBE, + BPF_PROG_TYPE_SCHED_CLS, + BPF_PROG_TYPE_SCHED_ACT, + BPF_PROG_TYPE_TRACEPOINT, + BPF_PROG_TYPE_XDP, + BPF_PROG_TYPE_PERF_EVENT, + BPF_PROG_TYPE_CGROUP_SKB, + BPF_PROG_TYPE_CGROUP_SOCK, + BPF_PROG_TYPE_LWT_IN, + BPF_PROG_TYPE_LWT_OUT, + BPF_PROG_TYPE_LWT_XMIT, + BPF_PROG_TYPE_SOCK_OPS, + BPF_PROG_TYPE_SK_SKB, + BPF_PROG_TYPE_CGROUP_DEVICE, + BPF_PROG_TYPE_SK_MSG, + BPF_PROG_TYPE_RAW_TRACEPOINT, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR, + BPF_PROG_TYPE_LWT_SEG6LOCAL, + BPF_PROG_TYPE_LIRC_MODE2, + BPF_PROG_TYPE_SK_REUSEPORT, + BPF_PROG_TYPE_FLOW_DISSECTOR, + BPF_PROG_TYPE_CGROUP_SYSCTL, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, + BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, + BPF_PROG_TYPE_STRUCT_OPS, + BPF_PROG_TYPE_EXT, + BPF_PROG_TYPE_LSM, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS, + BPF_CGROUP_INET_EGRESS, + BPF_CGROUP_INET_SOCK_CREATE, + BPF_CGROUP_SOCK_OPS, + BPF_SK_SKB_STREAM_PARSER, + BPF_SK_SKB_STREAM_VERDICT, + BPF_CGROUP_DEVICE, + BPF_SK_MSG_VERDICT, + BPF_CGROUP_INET4_BIND, + BPF_CGROUP_INET6_BIND, + BPF_CGROUP_INET4_CONNECT, + BPF_CGROUP_INET6_CONNECT, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_UDP4_SENDMSG, + BPF_CGROUP_UDP6_SENDMSG, + BPF_LIRC_MODE2, + BPF_FLOW_DISSECTOR, + BPF_CGROUP_SYSCTL, + BPF_CGROUP_UDP4_RECVMSG, + BPF_CGROUP_UDP6_RECVMSG, + BPF_CGROUP_GETSOCKOPT, + BPF_CGROUP_SETSOCKOPT, + BPF_TRACE_RAW_TP, + BPF_TRACE_FENTRY, + BPF_TRACE_FEXIT, + BPF_MODIFY_RETURN, + BPF_LSM_MAC, + BPF_TRACE_ITER, + BPF_CGROUP_INET4_GETPEERNAME, + BPF_CGROUP_INET6_GETPEERNAME, + BPF_CGROUP_INET4_GETSOCKNAME, + BPF_CGROUP_INET6_GETSOCKNAME, + BPF_XDP_DEVMAP, + __MAX_BPF_ATTACH_TYPE +}; + + + +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + + MAX_BPF_LINK_TYPE, +}; +# 355 "./include/uapi/linux/bpf.h" +enum { + BPF_ANY = 0, + BPF_NOEXIST = 1, + BPF_EXIST = 2, + BPF_F_LOCK = 4, +}; + + +enum { + BPF_F_NO_PREALLOC = (1U << 0), + + + + + + + BPF_F_NO_COMMON_LRU = (1U << 1), + + BPF_F_NUMA_NODE = (1U << 2), + + + BPF_F_RDONLY = (1U << 3), + BPF_F_WRONLY = (1U << 4), + + + BPF_F_STACK_BUILD_ID = (1U << 5), + + + BPF_F_ZERO_SEED = (1U << 6), + + + BPF_F_RDONLY_PROG = (1U << 7), + BPF_F_WRONLY_PROG = (1U << 8), + + + BPF_F_CLONE = (1U << 9), + + + BPF_F_MMAPABLE = (1U << 10), +}; +# 405 "./include/uapi/linux/bpf.h" +enum bpf_stats_type { + + BPF_STATS_RUN_TIME = 0, +}; + +enum bpf_stack_build_id_status { + + BPF_STACK_BUILD_ID_EMPTY = 0, + + BPF_STACK_BUILD_ID_VALID = 1, + + BPF_STACK_BUILD_ID_IP = 2, +}; + + +struct bpf_stack_build_id { + __s32 status; + unsigned char build_id[20]; + union { + __u64 offset; + __u64 ip; + }; +}; + + + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + + + __u32 inner_map_fd; + __u32 numa_node; + + + char map_name[16U]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + + + + }; + + struct { + __u32 map_fd; + __u64 __attribute__((aligned(8))) key; + union { + __u64 __attribute__((aligned(8))) value; + __u64 __attribute__((aligned(8))) next_key; + }; + __u64 flags; + }; + + struct { + __u64 __attribute__((aligned(8))) in_batch; + + + __u64 __attribute__((aligned(8))) out_batch; + __u64 __attribute__((aligned(8))) keys; + __u64 __attribute__((aligned(8))) values; + __u32 count; + + + + + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 __attribute__((aligned(8))) insns; + __u64 __attribute__((aligned(8))) license; + __u32 log_level; + __u32 log_size; + __u64 __attribute__((aligned(8))) log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[16U]; + __u32 prog_ifindex; + + + + + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 __attribute__((aligned(8))) func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 __attribute__((aligned(8))) line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + __u32 attach_prog_fd; + }; + + struct { + __u64 __attribute__((aligned(8))) pathname; + __u32 bpf_fd; + __u32 file_flags; + }; + + struct { + __u32 target_fd; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + + + + }; + + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + + + + __u64 __attribute__((aligned(8))) data_in; + __u64 __attribute__((aligned(8))) data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + + + + __u64 __attribute__((aligned(8))) ctx_in; + __u64 __attribute__((aligned(8))) ctx_out; + } test; + + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 __attribute__((aligned(8))) info; + } info; + + struct { + __u32 target_fd; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 __attribute__((aligned(8))) prog_ids; + __u32 prog_cnt; + } query; + + struct { + __u64 name; + __u32 prog_fd; + } raw_tracepoint; + + struct { + __u64 __attribute__((aligned(8))) btf; + __u64 __attribute__((aligned(8))) btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + }; + + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 __attribute__((aligned(8))) buf; + + + + + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + + struct { + __u32 prog_fd; + __u32 target_fd; + __u32 attach_type; + __u32 flags; + } link_create; + + struct { + __u32 link_fd; + + __u32 new_prog_fd; + __u32 flags; + + + __u32 old_prog_fd; + } link_update; + + struct { + __u32 type; + } enable_stats; + + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + +} __attribute__((aligned(8))); +# 3398 "./include/uapi/linux/bpf.h" +enum bpf_func_id { + BPF_FUNC_unspec, BPF_FUNC_map_lookup_elem, BPF_FUNC_map_update_elem, BPF_FUNC_map_delete_elem, BPF_FUNC_probe_read, BPF_FUNC_ktime_get_ns, BPF_FUNC_trace_printk, BPF_FUNC_get_prandom_u32, BPF_FUNC_get_smp_processor_id, BPF_FUNC_skb_store_bytes, BPF_FUNC_l3_csum_replace, BPF_FUNC_l4_csum_replace, BPF_FUNC_tail_call, BPF_FUNC_clone_redirect, BPF_FUNC_get_current_pid_tgid, BPF_FUNC_get_current_uid_gid, BPF_FUNC_get_current_comm, BPF_FUNC_get_cgroup_classid, BPF_FUNC_skb_vlan_push, BPF_FUNC_skb_vlan_pop, BPF_FUNC_skb_get_tunnel_key, BPF_FUNC_skb_set_tunnel_key, BPF_FUNC_perf_event_read, BPF_FUNC_redirect, BPF_FUNC_get_route_realm, BPF_FUNC_perf_event_output, BPF_FUNC_skb_load_bytes, BPF_FUNC_get_stackid, BPF_FUNC_csum_diff, BPF_FUNC_skb_get_tunnel_opt, BPF_FUNC_skb_set_tunnel_opt, BPF_FUNC_skb_change_proto, BPF_FUNC_skb_change_type, BPF_FUNC_skb_under_cgroup, BPF_FUNC_get_hash_recalc, BPF_FUNC_get_current_task, BPF_FUNC_probe_write_user, BPF_FUNC_current_task_under_cgroup, BPF_FUNC_skb_change_tail, BPF_FUNC_skb_pull_data, BPF_FUNC_csum_update, BPF_FUNC_set_hash_invalid, BPF_FUNC_get_numa_node_id, BPF_FUNC_skb_change_head, BPF_FUNC_xdp_adjust_head, BPF_FUNC_probe_read_str, BPF_FUNC_get_socket_cookie, BPF_FUNC_get_socket_uid, BPF_FUNC_set_hash, BPF_FUNC_setsockopt, BPF_FUNC_skb_adjust_room, BPF_FUNC_redirect_map, BPF_FUNC_sk_redirect_map, BPF_FUNC_sock_map_update, BPF_FUNC_xdp_adjust_meta, BPF_FUNC_perf_event_read_value, BPF_FUNC_perf_prog_read_value, BPF_FUNC_getsockopt, BPF_FUNC_override_return, BPF_FUNC_sock_ops_cb_flags_set, BPF_FUNC_msg_redirect_map, BPF_FUNC_msg_apply_bytes, BPF_FUNC_msg_cork_bytes, BPF_FUNC_msg_pull_data, BPF_FUNC_bind, BPF_FUNC_xdp_adjust_tail, BPF_FUNC_skb_get_xfrm_state, BPF_FUNC_get_stack, BPF_FUNC_skb_load_bytes_relative, BPF_FUNC_fib_lookup, BPF_FUNC_sock_hash_update, BPF_FUNC_msg_redirect_hash, BPF_FUNC_sk_redirect_hash, BPF_FUNC_lwt_push_encap, BPF_FUNC_lwt_seg6_store_bytes, BPF_FUNC_lwt_seg6_adjust_srh, BPF_FUNC_lwt_seg6_action, BPF_FUNC_rc_repeat, BPF_FUNC_rc_keydown, BPF_FUNC_skb_cgroup_id, BPF_FUNC_get_current_cgroup_id, BPF_FUNC_get_local_storage, BPF_FUNC_sk_select_reuseport, BPF_FUNC_skb_ancestor_cgroup_id, BPF_FUNC_sk_lookup_tcp, BPF_FUNC_sk_lookup_udp, BPF_FUNC_sk_release, BPF_FUNC_map_push_elem, BPF_FUNC_map_pop_elem, BPF_FUNC_map_peek_elem, BPF_FUNC_msg_push_data, BPF_FUNC_msg_pop_data, BPF_FUNC_rc_pointer_rel, BPF_FUNC_spin_lock, BPF_FUNC_spin_unlock, BPF_FUNC_sk_fullsock, BPF_FUNC_tcp_sock, BPF_FUNC_skb_ecn_set_ce, BPF_FUNC_get_listener_sock, BPF_FUNC_skc_lookup_tcp, BPF_FUNC_tcp_check_syncookie, BPF_FUNC_sysctl_get_name, BPF_FUNC_sysctl_get_current_value, BPF_FUNC_sysctl_get_new_value, BPF_FUNC_sysctl_set_new_value, BPF_FUNC_strtol, BPF_FUNC_strtoul, BPF_FUNC_sk_storage_get, BPF_FUNC_sk_storage_delete, BPF_FUNC_send_signal, BPF_FUNC_tcp_gen_syncookie, BPF_FUNC_skb_output, BPF_FUNC_probe_read_user, BPF_FUNC_probe_read_kernel, BPF_FUNC_probe_read_user_str, BPF_FUNC_probe_read_kernel_str, BPF_FUNC_tcp_send_ack, BPF_FUNC_send_signal_thread, BPF_FUNC_jiffies64, BPF_FUNC_read_branch_records, BPF_FUNC_get_ns_current_pid_tgid, BPF_FUNC_xdp_output, BPF_FUNC_get_netns_cookie, BPF_FUNC_get_current_ancestor_cgroup_id, BPF_FUNC_sk_assign, BPF_FUNC_ktime_get_boot_ns, BPF_FUNC_seq_printf, BPF_FUNC_seq_write, BPF_FUNC_sk_cgroup_id, BPF_FUNC_sk_ancestor_cgroup_id, BPF_FUNC_ringbuf_output, BPF_FUNC_ringbuf_reserve, BPF_FUNC_ringbuf_submit, BPF_FUNC_ringbuf_discard, BPF_FUNC_ringbuf_query, BPF_FUNC_csum_level, + __BPF_FUNC_MAX_ID, +}; + + + + + +enum { + BPF_F_RECOMPUTE_CSUM = (1ULL << 0), + BPF_F_INVALIDATE_HASH = (1ULL << 1), +}; + + + + +enum { + BPF_F_HDR_FIELD_MASK = 0xfULL, +}; + + +enum { + BPF_F_PSEUDO_HDR = (1ULL << 4), + BPF_F_MARK_MANGLED_0 = (1ULL << 5), + BPF_F_MARK_ENFORCE = (1ULL << 6), +}; + + +enum { + BPF_F_INGRESS = (1ULL << 0), +}; + + +enum { + BPF_F_TUNINFO_IPV6 = (1ULL << 0), +}; + + +enum { + BPF_F_SKIP_FIELD_MASK = 0xffULL, + BPF_F_USER_STACK = (1ULL << 8), + + BPF_F_FAST_STACK_CMP = (1ULL << 9), + BPF_F_REUSE_STACKID = (1ULL << 10), + + BPF_F_USER_BUILD_ID = (1ULL << 11), +}; + + +enum { + BPF_F_ZERO_CSUM_TX = (1ULL << 1), + BPF_F_DONT_FRAGMENT = (1ULL << 2), + BPF_F_SEQ_NUMBER = (1ULL << 3), +}; + + + + +enum { + BPF_F_INDEX_MASK = 0xffffffffULL, + BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, + + BPF_F_CTXLEN_MASK = (0xfffffULL << 32), +}; + + +enum { + BPF_F_CURRENT_NETNS = (-1L), +}; + + +enum { + BPF_CSUM_LEVEL_QUERY, + BPF_CSUM_LEVEL_INC, + BPF_CSUM_LEVEL_DEC, + BPF_CSUM_LEVEL_RESET, +}; + + +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), + BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), +}; + +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; + + + + + + +enum { + BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), +}; + + +enum { + BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0), +}; + + +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), +}; + + + + +enum { + BPF_RB_NO_WAKEUP = (1ULL << 0), + BPF_RB_FORCE_WAKEUP = (1ULL << 1), +}; + + +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + + +enum { + BPF_RINGBUF_BUSY_BIT = (1U << 31), + BPF_RINGBUF_DISCARD_BIT = (1U << 30), + BPF_RINGBUF_HDR_SZ = 8, +}; + + +enum bpf_adj_room_mode { + BPF_ADJ_ROOM_NET, + BPF_ADJ_ROOM_MAC, +}; + + +enum bpf_hdr_start_off { + BPF_HDR_START_MAC, + BPF_HDR_START_NET, +}; + + +enum bpf_lwt_encap_mode { + BPF_LWT_ENCAP_SEG6, + BPF_LWT_ENCAP_SEG6_INLINE, + BPF_LWT_ENCAP_IP, +}; +# 3562 "./include/uapi/linux/bpf.h" +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; + __u32 vlan_proto; + __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; + __u32 data; + __u32 data_end; + __u32 napi_id; + + + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + + + __u32 data_meta; + union { struct bpf_flow_keys * flow_keys; __u64 :64; } __attribute__((aligned(8))); + __u64 tstamp; + __u32 wire_len; + __u32 gso_segs; + union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8))); + __u32 gso_size; +}; + +struct bpf_tunnel_key { + __u32 tunnel_id; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; + __u8 tunnel_tos; + __u8 tunnel_ttl; + __u16 tunnel_ext; + __u32 tunnel_label; +}; + + + + +struct bpf_xfrm_state { + __u32 reqid; + __u32 spi; + __u16 family; + __u16 ext; + union { + __u32 remote_ipv4; + __u32 remote_ipv6[4]; + }; +}; +# 3634 "./include/uapi/linux/bpf.h" +enum bpf_ret_code { + BPF_OK = 0, + + BPF_DROP = 2, + + BPF_REDIRECT = 7, +# 3648 "./include/uapi/linux/bpf.h" + BPF_LWT_REROUTE = 128, +}; + +struct bpf_sock { + __u32 bound_dev_if; + __u32 family; + __u32 type; + __u32 protocol; + __u32 mark; + __u32 priority; + + __u32 src_ip4; + __u32 src_ip6[4]; + __u32 src_port; + __u32 dst_port; + __u32 dst_ip4; + __u32 dst_ip6[4]; + __u32 state; + __s32 rx_queue_mapping; +}; + +struct bpf_tcp_sock { + __u32 snd_cwnd; + __u32 srtt_us; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + + + __u32 data_segs_in; + + + __u32 segs_out; + + + __u32 data_segs_out; + + + __u32 lost_out; + __u32 sacked_out; + __u64 bytes_received; + + + + __u64 bytes_acked; + + + + __u32 dsack_dups; + + + __u32 delivered; + __u32 delivered_ce; + __u32 icsk_retransmits; +}; + +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + +struct bpf_xdp_sock { + __u32 queue_id; +}; +# 3742 "./include/uapi/linux/bpf.h" +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP, + XDP_PASS, + XDP_TX, + XDP_REDIRECT, +}; + + + + +struct xdp_md { + __u32 data; + __u32 data_end; + __u32 data_meta; + + __u32 ingress_ifindex; + __u32 rx_queue_index; + + __u32 egress_ifindex; +}; + + + + + + +struct bpf_devmap_val { + __u32 ifindex; + union { + int fd; + __u32 id; + } bpf_prog; +}; + +enum sk_action { + SK_DROP = 0, + SK_PASS, +}; + + + + +struct sk_msg_md { + union { void * data; __u64 :64; } __attribute__((aligned(8))); + union { void * data_end; __u64 :64; } __attribute__((aligned(8))); + + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 size; + + union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8))); +}; + +struct sk_reuseport_md { + + + + + union { void * data; __u64 :64; } __attribute__((aligned(8))); + + union { void * data_end; __u64 :64; } __attribute__((aligned(8))); + + + + + + + __u32 len; + + + + + __u32 eth_protocol; + __u32 ip_protocol; + __u32 bind_inany; + __u32 hash; +}; + + + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[8]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __u64 __attribute__((aligned(8))) jited_prog_insns; + __u64 __attribute__((aligned(8))) xlated_prog_insns; + __u64 load_time; + __u32 created_by_uid; + __u32 nr_map_ids; + __u64 __attribute__((aligned(8))) map_ids; + char name[16U]; + __u32 ifindex; + __u32 gpl_compatible:1; + __u32 :31; + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __u64 __attribute__((aligned(8))) jited_ksyms; + __u64 __attribute__((aligned(8))) jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __u64 __attribute__((aligned(8))) func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __u64 __attribute__((aligned(8))) line_info; + __u64 __attribute__((aligned(8))) jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __u64 __attribute__((aligned(8))) prog_tags; + __u64 run_time_ns; + __u64 run_cnt; +} __attribute__((aligned(8))); + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[16U]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; +} __attribute__((aligned(8))); + +struct bpf_btf_info { + __u64 __attribute__((aligned(8))) btf; + __u32 btf_size; + __u32 id; +} __attribute__((aligned(8))); + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __u64 __attribute__((aligned(8))) tp_name; + __u32 tp_name_len; + } raw_tracepoint; + struct { + __u32 attach_type; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + }; +} __attribute__((aligned(8))); + + + + + +struct bpf_sock_addr { + __u32 user_family; + __u32 user_ip4; + + + __u32 user_ip6[4]; + + + __u32 user_port; + + + __u32 family; + __u32 type; + __u32 protocol; + __u32 msg_src_ip4; + + + __u32 msg_src_ip6[4]; + + + union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8))); +}; + + + + + + + +struct bpf_sock_ops { + __u32 op; + union { + __u32 args[4]; + __u32 reply; + __u32 replylong[4]; + }; + __u32 family; + __u32 remote_ip4; + __u32 local_ip4; + __u32 remote_ip6[4]; + __u32 local_ip6[4]; + __u32 remote_port; + __u32 local_port; + __u32 is_fullsock; + + + + __u32 snd_cwnd; + __u32 srtt_us; + __u32 bpf_sock_ops_cb_flags; + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; + union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8))); +}; + + +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), + BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), + BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), + BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), + + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF, +}; + + + + +enum { + BPF_SOCK_OPS_VOID, + BPF_SOCK_OPS_TIMEOUT_INIT, + + + BPF_SOCK_OPS_RWND_INIT, + + + + BPF_SOCK_OPS_TCP_CONNECT_CB, + + + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, + + + + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, + + + + BPF_SOCK_OPS_NEEDS_ECN, + + + BPF_SOCK_OPS_BASE_RTT, + + + + + + + BPF_SOCK_OPS_RTO_CB, + + + + + BPF_SOCK_OPS_RETRANS_CB, + + + + + + BPF_SOCK_OPS_STATE_CB, + + + + BPF_SOCK_OPS_TCP_LISTEN_CB, + + + BPF_SOCK_OPS_RTT_CB, + +}; + + + + + + +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT, + BPF_TCP_SYN_RECV, + BPF_TCP_FIN_WAIT1, + BPF_TCP_FIN_WAIT2, + BPF_TCP_TIME_WAIT, + BPF_TCP_CLOSE, + BPF_TCP_CLOSE_WAIT, + BPF_TCP_LAST_ACK, + BPF_TCP_LISTEN, + BPF_TCP_CLOSING, + BPF_TCP_NEW_SYN_RECV, + + BPF_TCP_MAX_STATES +}; + +enum { + TCP_BPF_IW = 1001, + TCP_BPF_SNDCWND_CLAMP = 1002, +}; + +struct bpf_perf_event_value { + __u64 counter; + __u64 enabled; + __u64 running; +}; + +enum { + BPF_DEVCG_ACC_MKNOD = (1ULL << 0), + BPF_DEVCG_ACC_READ = (1ULL << 1), + BPF_DEVCG_ACC_WRITE = (1ULL << 2), +}; + +enum { + BPF_DEVCG_DEV_BLOCK = (1ULL << 0), + BPF_DEVCG_DEV_CHAR = (1ULL << 1), +}; + +struct bpf_cgroup_dev_ctx { + + __u32 access_type; + __u32 major; + __u32 minor; +}; + +struct bpf_raw_tracepoint_args { + __u64 args[0]; +}; + + + + +enum { + BPF_FIB_LOOKUP_DIRECT = (1U << 0), + BPF_FIB_LOOKUP_OUTPUT = (1U << 1), +}; + +enum { + BPF_FIB_LKUP_RET_SUCCESS, + BPF_FIB_LKUP_RET_BLACKHOLE, + BPF_FIB_LKUP_RET_UNREACHABLE, + BPF_FIB_LKUP_RET_PROHIBIT, + BPF_FIB_LKUP_RET_NOT_FWDED, + BPF_FIB_LKUP_RET_FWD_DISABLED, + BPF_FIB_LKUP_RET_UNSUPP_LWT, + BPF_FIB_LKUP_RET_NO_NEIGH, + BPF_FIB_LKUP_RET_FRAG_NEEDED, +}; + +struct bpf_fib_lookup { + + + + __u8 family; + + + __u8 l4_protocol; + __be16 sport; + __be16 dport; + + + __u16 tot_len; + + + + + __u32 ifindex; + + union { + + __u8 tos; + __be32 flowinfo; + + + __u32 rt_metric; + }; + + union { + __be32 ipv4_src; + __u32 ipv6_src[4]; + }; + + + + + + union { + __be32 ipv4_dst; + __u32 ipv6_dst[4]; + }; + + + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __u8 smac[6]; + __u8 dmac[6]; +}; + +enum bpf_task_fd_type { + BPF_FD_TYPE_RAW_TRACEPOINT, + BPF_FD_TYPE_TRACEPOINT, + BPF_FD_TYPE_KPROBE, + BPF_FD_TYPE_KRETPROBE, + BPF_FD_TYPE_UPROBE, + BPF_FD_TYPE_URETPROBE, +}; + +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), +}; + +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; + __u32 ipv6_dst[4]; + }; + }; + __u32 flags; + __be32 flow_label; +}; + +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + + + + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + +struct bpf_spin_lock { + __u32 val; +}; + +struct bpf_sysctl { + __u32 write; + + + __u32 file_pos; + + +}; + +struct bpf_sockopt { + union { struct bpf_sock * sk; __u64 :64; } __attribute__((aligned(8))); + union { void * optval; __u64 :64; } __attribute__((aligned(8))); + union { void * optval_end; __u64 :64; } __attribute__((aligned(8))); + + __s32 level; + __s32 optname; + __s32 optlen; + __s32 retval; +}; + +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; +# 8 "./include/linux/bpf.h" 2 + + +# 1 "./include/linux/file.h" 1 +# 13 "./include/linux/file.h" +struct file; + +extern void fput(struct file *); +extern void fput_many(struct file *, unsigned int); + +struct file_operations; +struct task_struct; +struct vfsmount; +struct dentry; +struct inode; +struct path; +extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *, + const char *, int flags, const struct file_operations *); +extern struct file *alloc_file_clone(struct file *, int flags, + const struct file_operations *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fput_light(struct file *file, int fput_needed) +{ + if (fput_needed) + fput(file); +} + +struct fd { + struct file *file; + unsigned int flags; +}; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fdput(struct fd fd) +{ + if (fd.flags & 1) + fput(fd.file); +} + +extern struct file *fget(unsigned int fd); +extern struct file *fget_many(unsigned int fd, unsigned int refs); +extern struct file *fget_raw(unsigned int fd); +extern struct file *fget_task(struct task_struct *task, unsigned int fd); +extern unsigned long __fdget(unsigned int fd); +extern unsigned long __fdget_raw(unsigned int fd); +extern unsigned long __fdget_pos(unsigned int fd); +extern void __f_unlock_pos(struct file *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct fd __to_fd(unsigned long v) +{ + return (struct fd){(struct file *)(v & ~3),v & 3}; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct fd fdget(unsigned int fd) +{ + return __to_fd(__fdget(fd)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct fd fdget_raw(unsigned int fd) +{ + return __to_fd(__fdget_raw(fd)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct fd fdget_pos(int fd) +{ + return __to_fd(__fdget_pos(fd)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fdput_pos(struct fd f) +{ + if (f.flags & 2) + __f_unlock_pos(f.file); + fdput(f); +} + +extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); +extern int replace_fd(unsigned fd, struct file *file, unsigned flags); +extern void set_close_on_exec(unsigned int fd, int flag); +extern bool get_close_on_exec(unsigned int fd); +extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile); +extern int get_unused_fd_flags(unsigned flags); +extern void put_unused_fd(unsigned int fd); + +extern void fd_install(unsigned int fd, struct file *file); + +extern void flush_delayed_fput(void); +extern void __fput_sync(struct file *); + +extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max; +# 11 "./include/linux/bpf.h" 2 +# 24 "./include/linux/bpf.h" +struct bpf_verifier_env; +struct bpf_verifier_log; +struct perf_event; +struct bpf_prog; +struct bpf_prog_aux; +struct bpf_map; +struct sock; +struct seq_file; +struct btf; +struct btf_type; +struct exception_table_entry; +struct seq_operations; + +extern struct idr btf_idr; +extern spinlock_t btf_idr_lock; + + +struct bpf_map_ops { + + int (*map_alloc_check)(union bpf_attr *attr); + struct bpf_map *(*map_alloc)(union bpf_attr *attr); + void (*map_release)(struct bpf_map *map, struct file *map_file); + void (*map_free)(struct bpf_map *map); + int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); + void (*map_release_uref)(struct bpf_map *map); + void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); + int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, + union bpf_attr *uattr); + int (*map_lookup_and_delete_batch)(struct bpf_map *map, + const union bpf_attr *attr, + union bpf_attr *uattr); + int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, + union bpf_attr *uattr); + int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, + union bpf_attr *uattr); + + + void *(*map_lookup_elem)(struct bpf_map *map, void *key); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_map *map, void *key); + int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); + int (*map_pop_elem)(struct bpf_map *map, void *value); + int (*map_peek_elem)(struct bpf_map *map, void *value); + + + void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, + int fd); + void (*map_fd_put_ptr)(void *ptr); + u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); + u32 (*map_fd_sys_lookup_elem)(void *ptr); + void (*map_seq_show_elem)(struct bpf_map *map, void *key, + struct seq_file *m); + int (*map_check_btf)(const struct bpf_map *map, + const struct btf *btf, + const struct btf_type *key_type, + const struct btf_type *value_type); + + + int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); + void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); + void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, + struct bpf_prog *new); + + + int (*map_direct_value_addr)(const struct bpf_map *map, + u64 *imm, u32 off); + int (*map_direct_value_meta)(const struct bpf_map *map, + u64 imm, u32 *off); + int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); + __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, + struct poll_table_struct *pts); +}; + +struct bpf_map_memory { + u32 pages; + struct user_struct *user; +}; + +struct bpf_map { + + + + const struct bpf_map_ops *ops __attribute__((__aligned__((1 << (6))))); + struct bpf_map *inner_map_meta; + + void *security; + + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; + u32 map_flags; + int spin_lock_off; + u32 id; + int numa_node; + u32 btf_key_type_id; + u32 btf_value_type_id; + struct btf *btf; + struct bpf_map_memory memory; + char name[16U]; + u32 btf_vmlinux_value_type_id; + bool bypass_spec_v1; + bool frozen; + + + + + + atomic64_t refcnt __attribute__((__aligned__((1 << (6))))); + atomic64_t usercnt; + struct work_struct work; + struct mutex freeze_mutex; + u64 writecnt; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool map_value_has_spin_lock(const struct bpf_map *map) +{ + return map->spin_lock_off >= 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_and_init_map_lock(struct bpf_map *map, void *dst) +{ + if (__builtin_expect(!!(!map_value_has_spin_lock(map)), 1)) + return; + *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = + (struct bpf_spin_lock){}; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_map_value(struct bpf_map *map, void *dst, void *src) +{ + if (__builtin_expect(!!(map_value_has_spin_lock(map)), 0)) { + u32 off = map->spin_lock_off; + + memcpy(dst, src, off); + memcpy(dst + off + sizeof(struct bpf_spin_lock), + src + off + sizeof(struct bpf_spin_lock), + map->value_size - off - sizeof(struct bpf_spin_lock)); + } else { + memcpy(dst, src, map->value_size); + } +} +void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, + bool lock_src); +int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); + +struct bpf_offload_dev; +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *map, + void *key, void *next_key); + int (*map_lookup_elem)(struct bpf_offloaded_map *map, + void *key, void *value); + int (*map_update_elem)(struct bpf_offloaded_map *map, + void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) +{ + return ({ void *__mptr = (void *)(map); do { extern void __compiletime_assert_1013(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(map)), typeof(((struct bpf_offloaded_map *)0)->map)) && !__builtin_types_compatible_p(typeof(*(map)), typeof(void))))) __compiletime_assert_1013(); } while (0); ((struct bpf_offloaded_map *)(__mptr - __builtin_offsetof(struct bpf_offloaded_map, map))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_map_offload_neutral(const struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_map_support_seq_show(const struct bpf_map *map) +{ + return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && + map->ops->map_seq_show_elem; +} + +int map_check_no_btf(const struct bpf_map *map, + const struct btf *btf, + const struct btf_type *key_type, + const struct btf_type *value_type); + +extern const struct bpf_map_ops bpf_map_offload_ops; + + +enum bpf_arg_type { + ARG_DONTCARE = 0, + + + + + ARG_CONST_MAP_PTR, + ARG_PTR_TO_MAP_KEY, + ARG_PTR_TO_MAP_VALUE, + ARG_PTR_TO_UNINIT_MAP_VALUE, + ARG_PTR_TO_MAP_VALUE_OR_NULL, + + + + + ARG_PTR_TO_MEM, + ARG_PTR_TO_MEM_OR_NULL, + ARG_PTR_TO_UNINIT_MEM, + + + + + ARG_CONST_SIZE, + ARG_CONST_SIZE_OR_ZERO, + + ARG_PTR_TO_CTX, + ARG_PTR_TO_CTX_OR_NULL, + ARG_ANYTHING, + ARG_PTR_TO_SPIN_LOCK, + ARG_PTR_TO_SOCK_COMMON, + ARG_PTR_TO_INT, + ARG_PTR_TO_LONG, + ARG_PTR_TO_SOCKET, + ARG_PTR_TO_BTF_ID, + ARG_PTR_TO_ALLOC_MEM, + ARG_PTR_TO_ALLOC_MEM_OR_NULL, + ARG_CONST_ALLOC_SIZE_OR_ZERO, +}; + + +enum bpf_return_type { + RET_INTEGER, + RET_VOID, + RET_PTR_TO_MAP_VALUE, + RET_PTR_TO_MAP_VALUE_OR_NULL, + RET_PTR_TO_SOCKET_OR_NULL, + RET_PTR_TO_TCP_SOCK_OR_NULL, + RET_PTR_TO_SOCK_COMMON_OR_NULL, + RET_PTR_TO_ALLOC_MEM_OR_NULL, +}; + + + + + +struct bpf_func_proto { + u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + bool gpl_only; + bool pkt_access; + enum bpf_return_type ret_type; + union { + struct { + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; + enum bpf_arg_type arg3_type; + enum bpf_arg_type arg4_type; + enum bpf_arg_type arg5_type; + }; + enum bpf_arg_type arg_type[5]; + }; + int *btf_id; +}; + + + + + +struct bpf_context; + +enum bpf_access_type { + BPF_READ = 1, + BPF_WRITE = 2 +}; +# 309 "./include/linux/bpf.h" +enum bpf_reg_type { + NOT_INIT = 0, + SCALAR_VALUE, + PTR_TO_CTX, + CONST_PTR_TO_MAP, + PTR_TO_MAP_VALUE, + PTR_TO_MAP_VALUE_OR_NULL, + PTR_TO_STACK, + PTR_TO_PACKET_META, + PTR_TO_PACKET, + PTR_TO_PACKET_END, + PTR_TO_FLOW_KEYS, + PTR_TO_SOCKET, + PTR_TO_SOCKET_OR_NULL, + PTR_TO_SOCK_COMMON, + PTR_TO_SOCK_COMMON_OR_NULL, + PTR_TO_TCP_SOCK, + PTR_TO_TCP_SOCK_OR_NULL, + PTR_TO_TP_BUFFER, + PTR_TO_XDP_SOCK, + PTR_TO_BTF_ID, + PTR_TO_BTF_ID_OR_NULL, + PTR_TO_MEM, + PTR_TO_MEM_OR_NULL, +}; + + + + +struct bpf_insn_access_aux { + enum bpf_reg_type reg_type; + union { + int ctx_field_size; + u32 btf_id; + }; + struct bpf_verifier_log *log; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) +{ + aux->ctx_field_size = size; +} + +struct bpf_prog_ops { + int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr *uattr); +}; + +struct bpf_verifier_ops { + + const struct bpf_func_proto * + (*get_func_proto)(enum bpf_func_id func_id, + const struct bpf_prog *prog); + + + + + bool (*is_valid_access)(int off, int size, enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info); + int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, + const struct bpf_prog *prog); + int (*gen_ld_abs)(const struct bpf_insn *orig, + struct bpf_insn *insn_buf); + u32 (*convert_ctx_access)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog, u32 *target_size); + int (*btf_struct_access)(struct bpf_verifier_log *log, + const struct btf_type *t, int off, int size, + enum bpf_access_type atype, + u32 *next_btf_id); +}; + +struct bpf_prog_offload_ops { + + int (*insn_hook)(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); + int (*finalize)(struct bpf_verifier_env *env); + + int (*replace_insn)(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); + int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); + + int (*prepare)(struct bpf_prog *prog); + int (*translate)(struct bpf_prog *prog); + void (*destroy)(struct bpf_prog *prog); +}; + +struct bpf_prog_offload { + struct bpf_prog *prog; + struct net_device *netdev; + struct bpf_offload_dev *offdev; + void *dev_priv; + struct list_head offloads; + bool dev_state; + bool opt_failed; + void *jited_image; + u32 jited_len; +}; + +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED, + BPF_CGROUP_STORAGE_PERCPU, + __BPF_CGROUP_STORAGE_MAX +}; +# 424 "./include/linux/bpf.h" +struct bpf_prog_stats { + u64 cnt; + u64 nsecs; + struct u64_stats_sync syncp; +} __attribute__((__aligned__(2 * sizeof(u64)))); + +struct btf_func_model { + u8 ret_size; + u8 nr_args; + u8 arg_size[12]; +}; +# 455 "./include/linux/bpf.h" +struct bpf_tramp_progs { + struct bpf_prog *progs[40]; + int nr_progs; +}; +# 480 "./include/linux/bpf.h" +int arch_prepare_bpf_trampoline(void *image, void *image_end, + const struct btf_func_model *m, u32 flags, + struct bpf_tramp_progs *tprogs, + void *orig_call); + +u64 __attribute__((no_instrument_function)) __bpf_prog_enter(void); +void __attribute__((no_instrument_function)) __bpf_prog_exit(struct bpf_prog *prog, u64 start); + +struct bpf_ksym { + unsigned long start; + unsigned long end; + char name[128]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY, + BPF_TRAMP_FEXIT, + BPF_TRAMP_MODIFY_RETURN, + BPF_TRAMP_MAX, + BPF_TRAMP_REPLACE, +}; + +struct bpf_trampoline { + + struct hlist_node hlist; + + struct mutex mutex; + refcount_t refcnt; + u64 key; + struct { + struct btf_func_model model; + void *addr; + bool ftrace_managed; + } func; + + + + + struct bpf_prog *extension_prog; + + struct hlist_head progs_hlist[BPF_TRAMP_MAX]; + + int progs_cnt[BPF_TRAMP_MAX]; + + void *image; + u64 selector; + struct bpf_ksym ksym; +}; + + + +struct bpf_dispatcher_prog { + struct bpf_prog *prog; + refcount_t users; +}; + +struct bpf_dispatcher { + + struct mutex mutex; + void *func; + struct bpf_dispatcher_prog progs[48]; + int num_progs; + void *image; + u32 image_off; + struct bpf_ksym ksym; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) unsigned int bpf_dispatcher_nop_func( + const void *ctx, + const struct bpf_insn *insnsi, + unsigned int (*bpf_func)(const void *, + const struct bpf_insn *)) +{ + return bpf_func(ctx, insnsi); +} + +struct bpf_trampoline *bpf_trampoline_lookup(u64 key); +int bpf_trampoline_link_prog(struct bpf_prog *prog); +int bpf_trampoline_unlink_prog(struct bpf_prog *prog); +void bpf_trampoline_put(struct bpf_trampoline *tr); +# 597 "./include/linux/bpf.h" +void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, + struct bpf_prog *to); + +void *bpf_jit_alloc_exec_page(void); +void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); +void bpf_image_ksym_del(struct bpf_ksym *ksym); +void bpf_ksym_add(struct bpf_ksym *ksym); +void bpf_ksym_del(struct bpf_ksym *ksym); +# 632 "./include/linux/bpf.h" +struct bpf_func_info_aux { + u16 linkage; + bool unreliable; +}; + +enum bpf_jit_poke_reason { + BPF_POKE_REASON_TAIL_CALL, +}; + + +struct bpf_jit_poke_descriptor { + void *ip; + union { + struct { + struct bpf_map *map; + u32 key; + } tail_call; + }; + bool ip_stable; + u8 adj_off; + u16 reason; +}; + + +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; +}; + +struct bpf_prog_aux { + atomic64_t refcnt; + u32 used_map_cnt; + u32 max_ctx_offset; + u32 max_pkt_offset; + u32 max_tp_access; + u32 stack_depth; + u32 id; + u32 func_cnt; + u32 func_idx; + u32 attach_btf_id; + u32 ctx_arg_info_size; + const struct bpf_ctx_arg_aux *ctx_arg_info; + struct bpf_prog *linked_prog; + bool verifier_zext; + bool offload_requested; + bool attach_btf_trace; + bool func_proto_unreliable; + enum bpf_tramp_prog_type trampoline_prog_type; + struct bpf_trampoline *trampoline; + struct hlist_node tramp_hlist; + + const struct btf_type *attach_func_proto; + + const char *attach_func_name; + struct bpf_prog **func; + void *jit_data; + struct bpf_jit_poke_descriptor *poke_tab; + u32 size_poke_tab; + struct bpf_ksym ksym; + const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct bpf_prog *prog; + struct user_struct *user; + u64 load_time; + struct bpf_map *cgroup_storage[__BPF_CGROUP_STORAGE_MAX]; + char name[16U]; + + void *security; + + struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; + + + + + + + struct bpf_line_info *linfo; + + + + + + + + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + + + + + u32 linfo_idx; + u32 num_exentries; + struct exception_table_entry *extable; + struct bpf_prog_stats *stats; + union { + struct work_struct work; + struct callback_head rcu; + }; +}; + +struct bpf_array_aux { + + + + + + enum bpf_prog_type type; + bool jited; + + struct list_head poke_progs; + struct bpf_map *map; + struct mutex poke_mutex; + struct work_struct work; +}; + +struct bpf_struct_ops_value; +struct btf_type; +struct btf_member; + + +struct bpf_struct_ops { + const struct bpf_verifier_ops *verifier_ops; + int (*init)(struct btf *btf); + int (*check_member)(const struct btf_type *t, + const struct btf_member *member); + int (*init_member)(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata); + int (*reg)(void *kdata); + void (*unreg)(void *kdata); + const struct btf_type *type; + const struct btf_type *value_type; + const char *name; + struct btf_func_model func_models[64]; + u32 type_id; + u32 value_id; +}; + + + +const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); +void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); +bool bpf_struct_ops_get(const void *kdata); +void bpf_struct_ops_put(const void *kdata); +int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, + void *value); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_try_module_get(const void *data, struct module *owner) +{ + if (owner == ((void *)((0xeB9FUL << 2) + (0xdead000000000000UL)))) + return bpf_struct_ops_get(data); + else + return try_module_get(owner); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_module_put(const void *data, struct module *owner) +{ + if (owner == ((void *)((0xeB9FUL << 2) + (0xdead000000000000UL)))) + bpf_struct_ops_put(data); + else + module_put(owner); +} +# 821 "./include/linux/bpf.h" +struct bpf_array { + struct bpf_map map; + u32 elem_size; + u32 index_mask; + struct bpf_array_aux *aux; + union { + char value[0] __attribute__((__aligned__(8))); + void *ptrs[0] __attribute__((__aligned__(8))); + void *pptrs[0] __attribute__((__aligned__(8))); + }; +}; +# 844 "./include/linux/bpf.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 bpf_map_flags_to_cap(struct bpf_map *map) +{ + u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); + + + + + if (access_flags & BPF_F_RDONLY_PROG) + return ((((1UL))) << (0)); + else if (access_flags & BPF_F_WRONLY_PROG) + return ((((1UL))) << (1)); + else + return ((((1UL))) << (0)) | ((((1UL))) << (1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_map_flags_access_ok(u32 access_flags) +{ + return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != + (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); +} + +struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct callback_head rcu; +}; + +bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +int bpf_prog_calc_tag(struct bpf_prog *fp); +const char *kernel_type_name(u32 btf_type_id); + +const struct bpf_func_proto *bpf_get_trace_printk_proto(void); + +typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, + unsigned long off, unsigned long len); +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog, + u32 *target_size); + +u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, + void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); +# 901 "./include/linux/bpf.h" +struct bpf_prog_array_item { + struct bpf_prog *prog; + struct bpf_cgroup_storage *cgroup_storage[__BPF_CGROUP_STORAGE_MAX]; +}; + +struct bpf_prog_array { + struct callback_head rcu; + struct bpf_prog_array_item items[]; +}; + +struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); +void bpf_prog_array_free(struct bpf_prog_array *progs); +int bpf_prog_array_length(struct bpf_prog_array *progs); +bool bpf_prog_array_is_empty(struct bpf_prog_array *array); +int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, + __u32 *prog_ids, u32 cnt); + +void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, + struct bpf_prog *old_prog); +int bpf_prog_array_copy_info(struct bpf_prog_array *array, + u32 *prog_ids, u32 request_cnt, + u32 *prog_cnt); +int bpf_prog_array_copy(struct bpf_prog_array *old_array, + struct bpf_prog *exclude_prog, + struct bpf_prog *include_prog, + struct bpf_prog_array **new_array); +# 1008 "./include/linux/bpf.h" +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_bpf_prog_active; extern __attribute__((section(".data..percpu" ""))) __typeof__(int) bpf_prog_active; +extern struct mutex bpf_stats_enabled_mutex; +# 1023 "./include/linux/bpf.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_disable_instrumentation(void) +{ + migrate_disable(); + if (0) + do { do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(bpf_prog_active)) { case 1: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); + else + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(bpf_prog_active)) { case 1: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_enable_instrumentation(void) +{ + if (0) + do { do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(bpf_prog_active)) { case 1: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(bpf_prog_active))(1)) && ((-(typeof(bpf_prog_active))(1)) == 1 || (-(typeof(bpf_prog_active))(1)) == -1)) ? (int)(-(typeof(bpf_prog_active))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(bpf_prog_active))(1)); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(bpf_prog_active))(1)) && ((-(typeof(bpf_prog_active))(1)) == 1 || (-(typeof(bpf_prog_active))(1)) == -1)) ? (int)(-(typeof(bpf_prog_active))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(bpf_prog_active))(1)); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(bpf_prog_active))(1)) && ((-(typeof(bpf_prog_active))(1)) == 1 || (-(typeof(bpf_prog_active))(1)) == -1)) ? (int)(-(typeof(bpf_prog_active))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(bpf_prog_active))(1)); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(bpf_prog_active))(1)) && ((-(typeof(bpf_prog_active))(1)) == 1 || (-(typeof(bpf_prog_active))(1)) == -1)) ? (int)(-(typeof(bpf_prog_active))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(bpf_prog_active))(1)); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); + else + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(bpf_prog_active)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(bpf_prog_active)) { case 1: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(bpf_prog_active))(1)) && ((-(typeof(bpf_prog_active))(1)) == 1 || (-(typeof(bpf_prog_active))(1)) == -1)) ? (int)(-(typeof(bpf_prog_active))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(bpf_prog_active))(1)); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(bpf_prog_active))(1)) && ((-(typeof(bpf_prog_active))(1)) == 1 || (-(typeof(bpf_prog_active))(1)) == -1)) ? (int)(-(typeof(bpf_prog_active))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(bpf_prog_active))(1)); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(bpf_prog_active))(1)) && ((-(typeof(bpf_prog_active))(1)) == 1 || (-(typeof(bpf_prog_active))(1)) == -1)) ? (int)(-(typeof(bpf_prog_active))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(bpf_prog_active))(1)); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((bpf_prog_active)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(bpf_prog_active))(1)) && ((-(typeof(bpf_prog_active))(1)) == 1 || (-(typeof(bpf_prog_active))(1)) == -1)) ? (int)(-(typeof(bpf_prog_active))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(bpf_prog_active))(1)); (void)pao_tmp__; } switch (sizeof((bpf_prog_active))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "qi" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "ri" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((bpf_prog_active)) : "re" ((pao_T__)(-(typeof(bpf_prog_active))(1)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); + migrate_enable(); +} + +extern const struct file_operations bpf_map_fops; +extern const struct file_operations bpf_prog_fops; +extern const struct file_operations bpf_iter_fops; + + + + + + + +# 1 "./include/linux/bpf_types.h" 1 + + + + +extern const struct bpf_prog_ops sk_filter_prog_ops; extern const struct bpf_verifier_ops sk_filter_verifier_ops; + +extern const struct bpf_prog_ops tc_cls_act_prog_ops; extern const struct bpf_verifier_ops tc_cls_act_verifier_ops; + +extern const struct bpf_prog_ops tc_cls_act_prog_ops; extern const struct bpf_verifier_ops tc_cls_act_verifier_ops; + +extern const struct bpf_prog_ops xdp_prog_ops; extern const struct bpf_verifier_ops xdp_verifier_ops; + + +extern const struct bpf_prog_ops cg_skb_prog_ops; extern const struct bpf_verifier_ops cg_skb_verifier_ops; + +extern const struct bpf_prog_ops cg_sock_prog_ops; extern const struct bpf_verifier_ops cg_sock_verifier_ops; + +extern const struct bpf_prog_ops cg_sock_addr_prog_ops; extern const struct bpf_verifier_ops cg_sock_addr_verifier_ops; + + +extern const struct bpf_prog_ops lwt_in_prog_ops; extern const struct bpf_verifier_ops lwt_in_verifier_ops; + +extern const struct bpf_prog_ops lwt_out_prog_ops; extern const struct bpf_verifier_ops lwt_out_verifier_ops; + +extern const struct bpf_prog_ops lwt_xmit_prog_ops; extern const struct bpf_verifier_ops lwt_xmit_verifier_ops; + +extern const struct bpf_prog_ops lwt_seg6local_prog_ops; extern const struct bpf_verifier_ops lwt_seg6local_verifier_ops; + +extern const struct bpf_prog_ops sock_ops_prog_ops; extern const struct bpf_verifier_ops sock_ops_verifier_ops; + +extern const struct bpf_prog_ops sk_skb_prog_ops; extern const struct bpf_verifier_ops sk_skb_verifier_ops; + +extern const struct bpf_prog_ops sk_msg_prog_ops; extern const struct bpf_verifier_ops sk_msg_verifier_ops; + +extern const struct bpf_prog_ops flow_dissector_prog_ops; extern const struct bpf_verifier_ops flow_dissector_verifier_ops; + + + +extern const struct bpf_prog_ops kprobe_prog_ops; extern const struct bpf_verifier_ops kprobe_verifier_ops; + +extern const struct bpf_prog_ops tracepoint_prog_ops; extern const struct bpf_verifier_ops tracepoint_verifier_ops; + +extern const struct bpf_prog_ops perf_event_prog_ops; extern const struct bpf_verifier_ops perf_event_verifier_ops; + +extern const struct bpf_prog_ops raw_tracepoint_prog_ops; extern const struct bpf_verifier_ops raw_tracepoint_verifier_ops; + +extern const struct bpf_prog_ops raw_tracepoint_writable_prog_ops; extern const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops; + +extern const struct bpf_prog_ops tracing_prog_ops; extern const struct bpf_verifier_ops tracing_verifier_ops; + + + +extern const struct bpf_prog_ops cg_dev_prog_ops; extern const struct bpf_verifier_ops cg_dev_verifier_ops; + +extern const struct bpf_prog_ops cg_sysctl_prog_ops; extern const struct bpf_verifier_ops cg_sysctl_verifier_ops; + +extern const struct bpf_prog_ops cg_sockopt_prog_ops; extern const struct bpf_verifier_ops cg_sockopt_verifier_ops; + + + +extern const struct bpf_prog_ops lirc_mode2_prog_ops; extern const struct bpf_verifier_ops lirc_mode2_verifier_ops; + + + +extern const struct bpf_prog_ops sk_reuseport_prog_ops; extern const struct bpf_verifier_ops sk_reuseport_verifier_ops; + + + +extern const struct bpf_prog_ops bpf_struct_ops_prog_ops; extern const struct bpf_verifier_ops bpf_struct_ops_verifier_ops; + +extern const struct bpf_prog_ops bpf_extension_prog_ops; extern const struct bpf_verifier_ops bpf_extension_verifier_ops; + + +extern const struct bpf_prog_ops lsm_prog_ops; extern const struct bpf_verifier_ops lsm_verifier_ops; + + + + +extern const struct bpf_map_ops array_map_ops; +extern const struct bpf_map_ops percpu_array_map_ops; +extern const struct bpf_map_ops prog_array_map_ops; +extern const struct bpf_map_ops perf_event_array_map_ops; + +extern const struct bpf_map_ops cgroup_array_map_ops; + + +extern const struct bpf_map_ops cgroup_storage_map_ops; +extern const struct bpf_map_ops cgroup_storage_map_ops; + +extern const struct bpf_map_ops htab_map_ops; +extern const struct bpf_map_ops htab_percpu_map_ops; +extern const struct bpf_map_ops htab_lru_map_ops; +extern const struct bpf_map_ops htab_lru_percpu_map_ops; +extern const struct bpf_map_ops trie_map_ops; + +extern const struct bpf_map_ops stack_trace_map_ops; + +extern const struct bpf_map_ops array_of_maps_map_ops; +extern const struct bpf_map_ops htab_of_maps_map_ops; + +extern const struct bpf_map_ops dev_map_ops; +extern const struct bpf_map_ops dev_map_hash_ops; +extern const struct bpf_map_ops sk_storage_map_ops; + +extern const struct bpf_map_ops sock_map_ops; +extern const struct bpf_map_ops sock_hash_ops; + +extern const struct bpf_map_ops cpu_map_ops; + +extern const struct bpf_map_ops xsk_map_ops; + + +extern const struct bpf_map_ops reuseport_array_ops; + + +extern const struct bpf_map_ops queue_map_ops; +extern const struct bpf_map_ops stack_map_ops; + +extern const struct bpf_map_ops bpf_struct_ops_map_ops; + +extern const struct bpf_map_ops ringbuf_map_ops; + + + + + + + + + +# 1052 "./include/linux/bpf.h" 2 + + + + +extern const struct bpf_prog_ops bpf_offload_prog_ops; +extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; +extern const struct bpf_verifier_ops xdp_analyzer_ops; + +struct bpf_prog *bpf_prog_get(u32 ufd); +struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, + bool attach_drv); +void bpf_prog_add(struct bpf_prog *prog, int i); +void bpf_prog_sub(struct bpf_prog *prog, int i); +void bpf_prog_inc(struct bpf_prog *prog); +struct bpf_prog * __attribute__((__warn_unused_result__)) bpf_prog_inc_not_zero(struct bpf_prog *prog); +void bpf_prog_put(struct bpf_prog *prog); +int __bpf_prog_charge(struct user_struct *user, u32 pages); +void __bpf_prog_uncharge(struct user_struct *user, u32 pages); +void __bpf_free_used_maps(struct bpf_prog_aux *aux, + struct bpf_map **used_maps, u32 len); + +void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); +void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); + +struct bpf_map *bpf_map_get(u32 ufd); +struct bpf_map *bpf_map_get_with_uref(u32 ufd); +struct bpf_map *__bpf_map_get(struct fd f); +void bpf_map_inc(struct bpf_map *map); +void bpf_map_inc_with_uref(struct bpf_map *map); +struct bpf_map * __attribute__((__warn_unused_result__)) bpf_map_inc_not_zero(struct bpf_map *map); +void bpf_map_put_with_uref(struct bpf_map *map); +void bpf_map_put(struct bpf_map *map); +int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); +void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); +int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); +void bpf_map_charge_finish(struct bpf_map_memory *mem); +void bpf_map_charge_move(struct bpf_map_memory *dst, + struct bpf_map_memory *src); +void *bpf_map_area_alloc(u64 size, int numa_node); +void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); +void bpf_map_area_free(void *base); +void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); +int generic_map_lookup_batch(struct bpf_map *map, + const union bpf_attr *attr, + union bpf_attr *uattr); +int generic_map_update_batch(struct bpf_map *map, + const union bpf_attr *attr, + union bpf_attr *uattr); +int generic_map_delete_batch(struct bpf_map *map, + const union bpf_attr *attr, + union bpf_attr *uattr); +struct bpf_map *bpf_map_get_curr_or_next(u32 *id); + +extern int sysctl_unprivileged_bpf_disabled; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_allow_ptr_leaks(void) +{ + return perfmon_capable(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_bypass_spec_v1(void) +{ + return perfmon_capable(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_bypass_spec_v4(void) +{ + return perfmon_capable(); +} + +int bpf_map_new_fd(struct bpf_map *map, int flags); +int bpf_prog_new_fd(struct bpf_prog *prog); + +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *link); + void (*dealloc)(struct bpf_link *link); + int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog); + void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); + int (*fill_link_info)(const struct bpf_link *link, + struct bpf_link_info *info); +}; + +void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog); +int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); +int bpf_link_settle(struct bpf_link_primer *primer); +void bpf_link_cleanup(struct bpf_link_primer *primer); +void bpf_link_inc(struct bpf_link *link); +void bpf_link_put(struct bpf_link *link); +int bpf_link_new_fd(struct bpf_link *link); +struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); +struct bpf_link *bpf_link_get_from_fd(u32 ufd); + +int bpf_obj_pin_user(u32 ufd, const char *pathname); +int bpf_obj_get_user(const char *pathname, int flags); + + + + + + +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data); +typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); + + +struct bpf_iter_reg { + const char *target; + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; + u32 ctx_arg_info_size; + struct bpf_ctx_arg_aux ctx_arg_info[2]; +}; + +struct bpf_iter_meta { + union { struct seq_file * seq; __u64 :64; } __attribute__((aligned(8))); + u64 session_id; + u64 seq_num; +}; + +int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); +void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); +bool bpf_iter_prog_supported(struct bpf_prog *prog); +int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int bpf_iter_new_fd(struct bpf_link *link); +bool bpf_link_is_iter(struct bpf_link *link); +struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); +int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); + +int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, + u64 flags); +int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, + u64 flags); + +int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); + +int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); +int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); + +int bpf_get_file_flag(int flags); +int bpf_check_uarg_tail_zero(void *uaddr, size_t expected_size, + size_t actual_size); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_long_memcpy(void *dst, const void *src, u32 size) +{ + const long *lsrc = src; + long *ldst = dst; + + size /= sizeof(long); + while (size--) + *ldst++ = *lsrc++; +} + + +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, + union bpf_attr *uattr); +void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); + + +struct xdp_buff; +struct sk_buff; + +struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); +struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); +void __dev_flush(void); +int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, + struct net_device *dev_rx); +int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, + struct net_device *dev_rx); +int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, + struct bpf_prog *xdp_prog); +bool dev_map_can_have_prog(struct bpf_map *map); + +struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); +void __cpu_map_flush(void); +int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, + struct net_device *dev_rx); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bpf_map_attr_numa_node(const union bpf_attr *attr) +{ + return (attr->map_flags & BPF_F_NUMA_NODE) ? + attr->numa_node : (-1); +} + +struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); +int array_map_alloc_check(union bpf_attr *attr); + +int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr *uattr); +int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, + union bpf_attr *uattr); +int bpf_prog_test_run_tracing(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr *uattr); +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr *uattr); +bool btf_ctx_access(int off, int size, enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info); +int btf_struct_access(struct bpf_verifier_log *log, + const struct btf_type *t, int off, int size, + enum bpf_access_type atype, + u32 *next_btf_id); +int btf_resolve_helper_id(struct bpf_verifier_log *log, + const struct bpf_func_proto *fn, int); + +int btf_distill_func_proto(struct bpf_verifier_log *log, + struct btf *btf, + const struct btf_type *func_proto, + const char *func_name, + struct btf_func_model *m); + +struct bpf_reg_state; +int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *regs); +int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *reg); +int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, + struct btf *btf, const struct btf_type *t); + +struct bpf_prog *bpf_prog_by_id(u32 id); + +const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); +# 1469 "./include/linux/bpf.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bpf_prog *bpf_prog_get_type(u32 ufd, + enum bpf_prog_type type) +{ + return bpf_prog_get_type_dev(ufd, type, false); +} + +bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); + +int bpf_prog_offload_compile(struct bpf_prog *prog); +void bpf_prog_offload_destroy(struct bpf_prog *prog); +int bpf_prog_offload_info_fill(struct bpf_prog_info *info, + struct bpf_prog *prog); + +int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); + +int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); +int bpf_map_offload_update_elem(struct bpf_map *map, + void *key, void *value, u64 flags); +int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); +int bpf_map_offload_get_next_key(struct bpf_map *map, + void *key, void *next_key); + +bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); + +struct bpf_offload_dev * +bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); +void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); +void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); +int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, + struct net_device *netdev); +void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, + struct net_device *netdev); +bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); + + +int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) +{ + return aux->offload_requested; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return __builtin_expect(!!(map->ops == &bpf_map_offload_ops), 0); +} + +struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); +void bpf_map_offload_map_free(struct bpf_map *map); +# 1546 "./include/linux/bpf.h" +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +void sock_map_unhash(struct sock *sk); +void sock_map_close(struct sock *sk, long timeout); +# 1565 "./include/linux/bpf.h" +void bpf_sk_reuseport_detach(struct sock *sk); +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value); +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags); +# 1592 "./include/linux/bpf.h" +extern const struct bpf_func_proto bpf_map_lookup_elem_proto; +extern const struct bpf_func_proto bpf_map_update_elem_proto; +extern const struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_map_push_elem_proto; +extern const struct bpf_func_proto bpf_map_pop_elem_proto; +extern const struct bpf_func_proto bpf_map_peek_elem_proto; + +extern const struct bpf_func_proto bpf_get_prandom_u32_proto; +extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; +extern const struct bpf_func_proto bpf_get_numa_node_id_proto; +extern const struct bpf_func_proto bpf_tail_call_proto; +extern const struct bpf_func_proto bpf_ktime_get_ns_proto; +extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; +extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; +extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; +extern const struct bpf_func_proto bpf_get_current_comm_proto; +extern const struct bpf_func_proto bpf_get_stackid_proto; +extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_sock_map_update_proto; +extern const struct bpf_func_proto bpf_sock_hash_update_proto; +extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; +extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; +extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; +extern const struct bpf_func_proto bpf_msg_redirect_map_proto; +extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; +extern const struct bpf_func_proto bpf_sk_redirect_map_proto; +extern const struct bpf_func_proto bpf_spin_lock_proto; +extern const struct bpf_func_proto bpf_spin_unlock_proto; +extern const struct bpf_func_proto bpf_get_local_storage_proto; +extern const struct bpf_func_proto bpf_strtol_proto; +extern const struct bpf_func_proto bpf_strtoul_proto; +extern const struct bpf_func_proto bpf_tcp_sock_proto; +extern const struct bpf_func_proto bpf_jiffies64_proto; +extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; +extern const struct bpf_func_proto bpf_event_output_data_proto; +extern const struct bpf_func_proto bpf_ringbuf_output_proto; +extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; +extern const struct bpf_func_proto bpf_ringbuf_submit_proto; +extern const struct bpf_func_proto bpf_ringbuf_discard_proto; +extern const struct bpf_func_proto bpf_ringbuf_query_proto; + +const struct bpf_func_proto *bpf_tracing_func_proto( + enum bpf_func_id func_id, const struct bpf_prog *prog); + +const struct bpf_func_proto *tracing_prog_func_proto( + enum bpf_func_id func_id, const struct bpf_prog *prog); + + +void bpf_user_rnd_init_once(void); +u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + + +bool bpf_sock_common_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info); +bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); +u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); +# 1679 "./include/linux/bpf.h" +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; +bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); + +u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); + +bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); + +u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); +# 1738 "./include/linux/bpf.h" +enum bpf_text_poke_type { + BPF_MOD_CALL, + BPF_MOD_JUMP, +}; + +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, + void *addr1, void *addr2); +# 6 "./include/linux/bpf-cgroup.h" 2 + + + + + + + +struct sock; +struct sockaddr; +struct cgroup; +struct sk_buff; +struct bpf_map; +struct bpf_prog; +struct bpf_sock_ops_kern; +struct bpf_cgroup_storage; +struct ctl_table; +struct ctl_table_header; + + + +extern struct static_key_false cgroup_bpf_enabled_key; + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_bpf_cgroup_storage[__BPF_CGROUP_STORAGE_MAX]; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct bpf_cgroup_storage*) bpf_cgroup_storage[__BPF_CGROUP_STORAGE_MAX] + ; + + + + +struct bpf_cgroup_storage_map; + +struct bpf_storage_buffer { + struct callback_head rcu; + char data[]; +}; + +struct bpf_cgroup_storage { + union { + struct bpf_storage_buffer *buf; + void *percpu_buf; + }; + struct bpf_cgroup_storage_map *map; + struct bpf_cgroup_storage_key key; + struct list_head list; + struct rb_node node; + struct callback_head rcu; +}; + +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +struct bpf_prog_list { + struct list_head node; + struct bpf_prog *prog; + struct bpf_cgroup_link *link; + struct bpf_cgroup_storage *storage[__BPF_CGROUP_STORAGE_MAX]; +}; + +struct bpf_prog_array; + +struct cgroup_bpf { + + struct bpf_prog_array *effective[__MAX_BPF_ATTACH_TYPE]; + + + + + + + struct list_head progs[__MAX_BPF_ATTACH_TYPE]; + u32 flags[__MAX_BPF_ATTACH_TYPE]; + + + struct bpf_prog_array *inactive; + + + struct percpu_ref refcnt; + + + struct work_struct release_work; +}; + +int cgroup_bpf_inherit(struct cgroup *cgrp); +void cgroup_bpf_offline(struct cgroup *cgrp); + +int __cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, + enum bpf_attach_type type, u32 flags); +int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + struct bpf_cgroup_link *link, + enum bpf_attach_type type); +int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + union bpf_attr *uattr); + + +int cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type, + u32 flags); +int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type); +int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, + union bpf_attr *uattr); + +int __cgroup_bpf_run_filter_skb(struct sock *sk, + struct sk_buff *skb, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sk(struct sock *sk, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, + struct sockaddr *uaddr, + enum bpf_attach_type type, + void *t_ctx); + +int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, + struct bpf_sock_ops_kern *sock_ops, + enum bpf_attach_type type); + +int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, + short access, enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, + struct ctl_table *table, int write, + void **buf, size_t *pcount, loff_t *ppos, + enum bpf_attach_type type); + +int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, + int *optname, char *optval, + int *optlen, char **kernel_optval); +int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, + int optname, char *optval, + int *optlen, int max_optlen, + int retval); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum bpf_cgroup_storage_type cgroup_storage_type( + struct bpf_map *map) +{ + if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) + return BPF_CGROUP_STORAGE_PERCPU; + + return BPF_CGROUP_STORAGE_SHARED; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_cgroup_storage_set(struct bpf_cgroup_storage + *storage[__BPF_CGROUP_STORAGE_MAX]) +{ + enum bpf_cgroup_storage_type stype; + + for (stype = 0; stype < __BPF_CGROUP_STORAGE_MAX; stype++) + do { do { const void *__vpp_verify = (typeof((&(bpf_cgroup_storage[stype])) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(bpf_cgroup_storage[stype])) { case 1: do { typedef typeof((bpf_cgroup_storage[stype])) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (storage[stype]); (void)pto_tmp__; } switch (sizeof((bpf_cgroup_storage[stype]))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "qi" ((pto_T__)(storage[stype]))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "ri" ((pto_T__)(storage[stype]))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "ri" ((pto_T__)(storage[stype]))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "re" ((pto_T__)(storage[stype]))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((bpf_cgroup_storage[stype])) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (storage[stype]); (void)pto_tmp__; } switch (sizeof((bpf_cgroup_storage[stype]))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "qi" ((pto_T__)(storage[stype]))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "ri" ((pto_T__)(storage[stype]))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "ri" ((pto_T__)(storage[stype]))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "re" ((pto_T__)(storage[stype]))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((bpf_cgroup_storage[stype])) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (storage[stype]); (void)pto_tmp__; } switch (sizeof((bpf_cgroup_storage[stype]))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "qi" ((pto_T__)(storage[stype]))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "ri" ((pto_T__)(storage[stype]))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "ri" ((pto_T__)(storage[stype]))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "re" ((pto_T__)(storage[stype]))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((bpf_cgroup_storage[stype])) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (storage[stype]); (void)pto_tmp__; } switch (sizeof((bpf_cgroup_storage[stype]))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "qi" ((pto_T__)(storage[stype]))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "ri" ((pto_T__)(storage[stype]))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "ri" ((pto_T__)(storage[stype]))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((bpf_cgroup_storage[stype])) : "re" ((pto_T__)(storage[stype]))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, + enum bpf_cgroup_storage_type stype); +void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); +void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, + struct cgroup *cgroup, + enum bpf_attach_type type); +void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); +int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); +void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map); + +int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, + void *value, u64 flags); +# 340 "./include/linux/bpf-cgroup.h" +int cgroup_bpf_prog_attach(const union bpf_attr *attr, + enum bpf_prog_type ptype, struct bpf_prog *prog); +int cgroup_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype); +int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int cgroup_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr *uattr); +# 23 "./include/linux/cgroup-defs.h" 2 +# 1 "./include/linux/psi_types.h" 1 + + + +# 1 "./include/linux/kthread.h" 1 + + + + + + + +struct mm_struct; + +__attribute__((__format__(printf, 4, 5))) +struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), + void *data, + int node, + const char namefmt[], ...); +# 31 "./include/linux/kthread.h" +struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + void *data, + unsigned int cpu, + const char *namefmt); +# 54 "./include/linux/kthread.h" +void free_kthread_struct(struct task_struct *k); +void kthread_bind(struct task_struct *k, unsigned int cpu); +void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); +int kthread_stop(struct task_struct *k); +bool kthread_should_stop(void); +bool kthread_should_park(void); +bool __kthread_should_park(struct task_struct *k); +bool kthread_freezable_should_stop(bool *was_frozen); +void *kthread_func(struct task_struct *k); +void *kthread_data(struct task_struct *k); +void *kthread_probe_data(struct task_struct *k); +int kthread_park(struct task_struct *k); +void kthread_unpark(struct task_struct *k); +void kthread_parkme(void); + +int kthreadd(void *unused); +extern struct task_struct *kthreadd_task; +extern int tsk_fork_get_node(struct task_struct *tsk); +# 81 "./include/linux/kthread.h" +struct kthread_work; +typedef void (*kthread_work_func_t)(struct kthread_work *work); +void kthread_delayed_work_timer_fn(struct timer_list *t); + +enum { + KTW_FREEZABLE = 1 << 0, +}; + +struct kthread_worker { + unsigned int flags; + raw_spinlock_t lock; + struct list_head work_list; + struct list_head delayed_work_list; + struct task_struct *task; + struct kthread_work *current_work; +}; + +struct kthread_work { + struct list_head node; + kthread_work_func_t func; + struct kthread_worker *worker; + + int canceling; +}; + +struct kthread_delayed_work { + struct kthread_work work; + struct timer_list timer; +}; +# 151 "./include/linux/kthread.h" +extern void __kthread_init_worker(struct kthread_worker *worker, + const char *name, struct lock_class_key *key); +# 175 "./include/linux/kthread.h" +int kthread_worker_fn(void *worker_ptr); + +__attribute__((__format__(printf, 2, 3))) +struct kthread_worker * +kthread_create_worker(unsigned int flags, const char namefmt[], ...); + +__attribute__((__format__(printf, 3, 4))) struct kthread_worker * +kthread_create_worker_on_cpu(int cpu, unsigned int flags, + const char namefmt[], ...); + +bool kthread_queue_work(struct kthread_worker *worker, + struct kthread_work *work); + +bool kthread_queue_delayed_work(struct kthread_worker *worker, + struct kthread_delayed_work *dwork, + unsigned long delay); + +bool kthread_mod_delayed_work(struct kthread_worker *worker, + struct kthread_delayed_work *dwork, + unsigned long delay); + +void kthread_flush_work(struct kthread_work *work); +void kthread_flush_worker(struct kthread_worker *worker); + +bool kthread_cancel_work_sync(struct kthread_work *work); +bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); + +void kthread_destroy_worker(struct kthread_worker *worker); + +void kthread_use_mm(struct mm_struct *mm); +void kthread_unuse_mm(struct mm_struct *mm); + +struct cgroup_subsys_state; + + +void kthread_associate_blkcg(struct cgroup_subsys_state *css); +struct cgroup_subsys_state *kthread_blkcg(void); +# 5 "./include/linux/psi_types.h" 2 +# 13 "./include/linux/psi_types.h" +enum psi_task_count { + NR_IOWAIT, + NR_MEMSTALL, + NR_RUNNING, + + + + + + + NR_ONCPU, + NR_PSI_TASK_COUNTS = 4, +}; +# 34 "./include/linux/psi_types.h" +enum psi_res { + PSI_IO, + PSI_MEM, + PSI_CPU, + NR_PSI_RESOURCES = 3, +}; + + + + + + + +enum psi_states { + PSI_IO_SOME, + PSI_IO_FULL, + PSI_MEM_SOME, + PSI_MEM_FULL, + PSI_CPU_SOME, + + PSI_NONIDLE, + NR_PSI_STATES = 6, +}; + +enum psi_aggregators { + PSI_AVGS = 0, + PSI_POLL, + NR_PSI_AGGREGATORS, +}; + +struct psi_group_cpu { + + + + seqcount_t seq __attribute__((__aligned__((1 << (6))))); + + + unsigned int tasks[NR_PSI_TASK_COUNTS]; + + + u32 state_mask; + + + u32 times[NR_PSI_STATES]; + + + u64 state_start; + + + + + u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES] + __attribute__((__aligned__((1 << (6))))); +}; + + +struct psi_window { + + u64 size; + + + u64 start_time; + + + u64 start_value; + + + u64 prev_growth; +}; + +struct psi_trigger { + + enum psi_states state; + + + u64 threshold; + + + struct list_head node; + + + struct psi_group *group; + + + wait_queue_head_t event_wait; + + + int event; + + + struct psi_window win; + + + + + + u64 last_event_time; + + + struct kref refcount; +}; + +struct psi_group { + + struct mutex avgs_lock; + + + struct psi_group_cpu *pcpu; + + + u64 avg_total[NR_PSI_STATES - 1]; + u64 avg_last_update; + u64 avg_next_update; + + + struct delayed_work avgs_work; + + + u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1]; + unsigned long avg[NR_PSI_STATES - 1][3]; + + + atomic_t poll_scheduled; + struct kthread_worker *poll_kworker; + struct kthread_delayed_work poll_work; + + + struct mutex trigger_lock; + + + struct list_head triggers; + u32 nr_triggers[NR_PSI_STATES - 1]; + u32 poll_states; + u64 poll_min_period; + + + u64 polling_total[NR_PSI_STATES - 1]; + u64 polling_next_update; + u64 polling_until; +}; +# 24 "./include/linux/cgroup-defs.h" 2 + + + +struct cgroup; +struct cgroup_root; +struct cgroup_subsys; +struct cgroup_taskset; +struct kernfs_node; +struct kernfs_ops; +struct kernfs_open_file; +struct seq_file; +struct poll_table_struct; + + + + + + + +enum cgroup_subsys_id { +# 1 "./include/linux/cgroup_subsys.h" 1 +# 13 "./include/linux/cgroup_subsys.h" +cpuset_cgrp_id, + + + +cpu_cgrp_id, + + + +cpuacct_cgrp_id, + + + +io_cgrp_id, + + + +memory_cgrp_id, + + + +devices_cgrp_id, + + + +freezer_cgrp_id, + + + +net_cls_cgrp_id, + + + +perf_event_cgrp_id, + + + +net_prio_cgrp_id, + + + +hugetlb_cgrp_id, + + + +pids_cgrp_id, + + + +rdma_cgrp_id, + + + + + + +debug_cgrp_id, +# 45 "./include/linux/cgroup-defs.h" 2 + CGROUP_SUBSYS_COUNT, +}; + + + +enum { + CSS_NO_REF = (1 << 0), + CSS_ONLINE = (1 << 1), + CSS_RELEASED = (1 << 2), + CSS_VISIBLE = (1 << 3), + CSS_DYING = (1 << 4), +}; + + +enum { + + CGRP_NOTIFY_ON_RELEASE, + + + + + + CGRP_CPUSET_CLONE_CHILDREN, + + + CGRP_FREEZE, + + + CGRP_FROZEN, +}; + + +enum { + CGRP_ROOT_NOPREFIX = (1 << 1), + CGRP_ROOT_XATTR = (1 << 2), + + + + + + + CGRP_ROOT_NS_DELEGATE = (1 << 3), + + + + + CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), + + + + + CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5), + + + + + CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6), +}; + + +enum { + CFTYPE_ONLY_ON_ROOT = (1 << 0), + CFTYPE_NOT_ON_ROOT = (1 << 1), + CFTYPE_NS_DELEGATABLE = (1 << 2), + + CFTYPE_NO_PREFIX = (1 << 3), + CFTYPE_WORLD_WRITABLE = (1 << 4), + CFTYPE_DEBUG = (1 << 5), + + + __CFTYPE_ONLY_ON_DFL = (1 << 16), + __CFTYPE_NOT_ON_DFL = (1 << 17), +}; + + + + + + +struct cgroup_file { + + struct kernfs_node *kn; + unsigned long notified_at; + struct timer_list notify_timer; +}; +# 138 "./include/linux/cgroup-defs.h" +struct cgroup_subsys_state { + + struct cgroup *cgroup; + + + struct cgroup_subsys *ss; + + + struct percpu_ref refcnt; + + + struct list_head sibling; + struct list_head children; + + + struct list_head rstat_css_node; + + + + + + int id; + + unsigned int flags; + + + + + + + + u64 serial_nr; + + + + + + atomic_t online_cnt; + + + struct work_struct destroy_work; + struct rcu_work destroy_rwork; + + + + + + struct cgroup_subsys_state *parent; +}; +# 195 "./include/linux/cgroup-defs.h" +struct css_set { + + + + + + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + + refcount_t refcount; + + + + + + + + struct css_set *dom_cset; + + + struct cgroup *dfl_cgrp; + + + int nr_tasks; +# 227 "./include/linux/cgroup-defs.h" + struct list_head tasks; + struct list_head mg_tasks; + struct list_head dying_tasks; + + + struct list_head task_iters; +# 241 "./include/linux/cgroup-defs.h" + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; + + + struct list_head threaded_csets; + struct list_head threaded_csets_node; + + + + + + struct hlist_node hlist; + + + + + + struct list_head cgrp_links; + + + + + + struct list_head mg_preload_node; + struct list_head mg_node; +# 273 "./include/linux/cgroup-defs.h" + struct cgroup *mg_src_cgrp; + struct cgroup *mg_dst_cgrp; + struct css_set *mg_dst_cset; + + + bool dead; + + + struct callback_head callback_head; +}; + +struct cgroup_base_stat { + struct task_cputime cputime; +}; +# 308 "./include/linux/cgroup-defs.h" +struct cgroup_rstat_cpu { + + + + + struct u64_stats_sync bsync; + struct cgroup_base_stat bstat; + + + + + + struct cgroup_base_stat last_bstat; +# 333 "./include/linux/cgroup-defs.h" + struct cgroup *updated_children; + struct cgroup *updated_next; +}; + +struct cgroup_freezer_state { + + bool freeze; + + + int e_freeze; + + + + + int nr_frozen_descendants; + + + + + + int nr_frozen_tasks; +}; + +struct cgroup { + + struct cgroup_subsys_state self; + + unsigned long flags; + + + + + + + + int level; + + + int max_depth; +# 384 "./include/linux/cgroup-defs.h" + int nr_descendants; + int nr_dying_descendants; + int max_descendants; +# 399 "./include/linux/cgroup-defs.h" + int nr_populated_csets; + int nr_populated_domain_children; + int nr_populated_threaded_children; + + int nr_threaded_children; + + struct kernfs_node *kn; + struct cgroup_file procs_file; + struct cgroup_file events_file; +# 416 "./include/linux/cgroup-defs.h" + u16 subtree_control; + u16 subtree_ss_mask; + u16 old_subtree_control; + u16 old_subtree_ss_mask; + + + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; + + struct cgroup_root *root; + + + + + + struct list_head cset_links; +# 439 "./include/linux/cgroup-defs.h" + struct list_head e_csets[CGROUP_SUBSYS_COUNT]; +# 448 "./include/linux/cgroup-defs.h" + struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; + + + struct cgroup_rstat_cpu *rstat_cpu; + struct list_head rstat_css_list; + + + struct cgroup_base_stat last_bstat; + struct cgroup_base_stat bstat; + struct prev_cputime prev_cputime; + + + + + + struct list_head pidlists; + struct mutex pidlist_mutex; + + + wait_queue_head_t offline_waitq; + + + struct work_struct release_agent_work; + + + struct psi_group psi; + + + struct cgroup_bpf bpf; + + + atomic_t congestion_count; + + + struct cgroup_freezer_state freezer; + + + u64 ancestor_ids[]; +}; + + + + + + +struct cgroup_root { + struct kernfs_root *kf_root; + + + unsigned int subsys_mask; + + + int hierarchy_id; + + + struct cgroup cgrp; + + + u64 cgrp_ancestor_id_storage; + + + atomic_t nr_cgrps; + + + struct list_head root_list; + + + unsigned int flags; + + + char release_agent_path[4096]; + + + char name[64]; +}; +# 532 "./include/linux/cgroup-defs.h" +struct cftype { + + + + + + char name[64]; + unsigned long private; + + + + + + size_t max_write_len; + + + unsigned int flags; + + + + + + + + unsigned int file_offset; + + + + + + struct cgroup_subsys *ss; + struct list_head node; + struct kernfs_ops *kf_ops; + + int (*open)(struct kernfs_open_file *of); + void (*release)(struct kernfs_open_file *of); + + + + + + u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); + + + + s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); + + + int (*seq_show)(struct seq_file *sf, void *v); + + + void *(*seq_start)(struct seq_file *sf, loff_t *ppos); + void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); + void (*seq_stop)(struct seq_file *sf, void *v); + + + + + + + int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, + u64 val); + + + + int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, + s64 val); + + + + + + + + ssize_t (*write)(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); + + __poll_t (*poll)(struct kernfs_open_file *of, + struct poll_table_struct *pt); + + + struct lock_class_key lockdep_key; + +}; + + + + + +struct cgroup_subsys { + struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); + int (*css_online)(struct cgroup_subsys_state *css); + void (*css_offline)(struct cgroup_subsys_state *css); + void (*css_released)(struct cgroup_subsys_state *css); + void (*css_free)(struct cgroup_subsys_state *css); + void (*css_reset)(struct cgroup_subsys_state *css); + void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); + int (*css_extra_stat_show)(struct seq_file *seq, + struct cgroup_subsys_state *css); + + int (*can_attach)(struct cgroup_taskset *tset); + void (*cancel_attach)(struct cgroup_taskset *tset); + void (*attach)(struct cgroup_taskset *tset); + void (*post_attach)(void); + int (*can_fork)(struct task_struct *task, + struct css_set *cset); + void (*cancel_fork)(struct task_struct *task, struct css_set *cset); + void (*fork)(struct task_struct *task); + void (*exit)(struct task_struct *task); + void (*release)(struct task_struct *task); + void (*bind)(struct cgroup_subsys_state *root_css); + + bool early_init:1; +# 657 "./include/linux/cgroup-defs.h" + bool implicit_on_dfl:1; +# 669 "./include/linux/cgroup-defs.h" + bool threaded:1; +# 683 "./include/linux/cgroup-defs.h" + bool broken_hierarchy:1; + bool warned_broken_hierarchy:1; + + + int id; + const char *name; + + + const char *legacy_name; + + + struct cgroup_root *root; + + + struct idr css_idr; + + + + + + struct list_head cfts; + + + + + + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; +# 719 "./include/linux/cgroup-defs.h" + unsigned int depends_on; +}; + +extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; +# 731 "./include/linux/cgroup-defs.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + percpu_down_read(&cgroup_threadgroup_rwsem); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_threadgroup_change_end(struct task_struct *tsk) +{ + percpu_up_read(&cgroup_threadgroup_rwsem); +} +# 789 "./include/linux/cgroup-defs.h" +struct sock_cgroup_data { + union { + + struct { + u8 is_data; + u8 padding; + u16 prioidx; + u32 classid; + } __attribute__((__packed__)); +# 806 "./include/linux/cgroup-defs.h" + u64 val; + }; +}; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) +{ + + return (skcd->is_data & 1) ? skcd->prioidx : 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) +{ + + return (skcd->is_data & 1) ? skcd->classid : 0; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, + u16 prioidx) +{ + struct sock_cgroup_data skcd_buf = {{ .val = ({ do { extern void __compiletime_assert_1014(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(skcd->val) == sizeof(char) || sizeof(skcd->val) == sizeof(short) || sizeof(skcd->val) == sizeof(int) || sizeof(skcd->val) == sizeof(long)) || sizeof(skcd->val) == sizeof(long long))) __compiletime_assert_1014(); } while (0); ({ typeof( _Generic((skcd->val), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skcd->val))) __x = (*(const volatile typeof( _Generic((skcd->val), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skcd->val))) *)&(skcd->val)); do { } while (0); (typeof(skcd->val))__x; }); }) }}; + + if (sock_cgroup_prioidx(&skcd_buf) == prioidx) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.prioidx = prioidx; + do { do { extern void __compiletime_assert_1015(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(skcd->val) == sizeof(char) || sizeof(skcd->val) == sizeof(short) || sizeof(skcd->val) == sizeof(int) || sizeof(skcd->val) == sizeof(long)) || sizeof(skcd->val) == sizeof(long long))) __compiletime_assert_1015(); } while (0); do { *(volatile typeof(skcd->val) *)&(skcd->val) = (skcd_buf.val); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, + u32 classid) +{ + struct sock_cgroup_data skcd_buf = {{ .val = ({ do { extern void __compiletime_assert_1016(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(skcd->val) == sizeof(char) || sizeof(skcd->val) == sizeof(short) || sizeof(skcd->val) == sizeof(int) || sizeof(skcd->val) == sizeof(long)) || sizeof(skcd->val) == sizeof(long long))) __compiletime_assert_1016(); } while (0); ({ typeof( _Generic((skcd->val), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skcd->val))) __x = (*(const volatile typeof( _Generic((skcd->val), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skcd->val))) *)&(skcd->val)); do { } while (0); (typeof(skcd->val))__x; }); }) }}; + + if (sock_cgroup_classid(&skcd_buf) == classid) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.classid = classid; + do { do { extern void __compiletime_assert_1017(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(skcd->val) == sizeof(char) || sizeof(skcd->val) == sizeof(short) || sizeof(skcd->val) == sizeof(int) || sizeof(skcd->val) == sizeof(long)) || sizeof(skcd->val) == sizeof(long long))) __compiletime_assert_1017(); } while (0); do { *(volatile typeof(skcd->val) *)&(skcd->val) = (skcd_buf.val); } while (0); } while (0); +} +# 29 "./include/linux/cgroup.h" 2 + +struct kernel_clone_args; +# 52 "./include/linux/cgroup.h" +struct css_task_iter { + struct cgroup_subsys *ss; + unsigned int flags; + + struct list_head *cset_pos; + struct list_head *cset_head; + + struct list_head *tcset_pos; + struct list_head *tcset_head; + + struct list_head *task_pos; + + struct list_head *cur_tasks_head; + struct css_set *cur_cset; + struct css_set *cur_dcset; + struct task_struct *cur_task; + struct list_head iters_node; +}; + +extern struct cgroup_root cgrp_dfl_root; +extern struct css_set init_css_set; + + +# 1 "./include/linux/cgroup_subsys.h" 1 +# 13 "./include/linux/cgroup_subsys.h" +extern struct cgroup_subsys cpuset_cgrp_subsys; + + + +extern struct cgroup_subsys cpu_cgrp_subsys; + + + +extern struct cgroup_subsys cpuacct_cgrp_subsys; + + + +extern struct cgroup_subsys io_cgrp_subsys; + + + +extern struct cgroup_subsys memory_cgrp_subsys; + + + +extern struct cgroup_subsys devices_cgrp_subsys; + + + +extern struct cgroup_subsys freezer_cgrp_subsys; + + + +extern struct cgroup_subsys net_cls_cgrp_subsys; + + + +extern struct cgroup_subsys perf_event_cgrp_subsys; + + + +extern struct cgroup_subsys net_prio_cgrp_subsys; + + + +extern struct cgroup_subsys hugetlb_cgrp_subsys; + + + +extern struct cgroup_subsys pids_cgrp_subsys; + + + +extern struct cgroup_subsys rdma_cgrp_subsys; + + + + + + +extern struct cgroup_subsys debug_cgrp_subsys; +# 76 "./include/linux/cgroup.h" 2 + + + + + +# 1 "./include/linux/cgroup_subsys.h" 1 +# 13 "./include/linux/cgroup_subsys.h" +extern struct static_key_true cpuset_cgrp_subsys_enabled_key; extern struct static_key_true cpuset_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true cpu_cgrp_subsys_enabled_key; extern struct static_key_true cpu_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true cpuacct_cgrp_subsys_enabled_key; extern struct static_key_true cpuacct_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true io_cgrp_subsys_enabled_key; extern struct static_key_true io_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true memory_cgrp_subsys_enabled_key; extern struct static_key_true memory_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true devices_cgrp_subsys_enabled_key; extern struct static_key_true devices_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true freezer_cgrp_subsys_enabled_key; extern struct static_key_true freezer_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true net_cls_cgrp_subsys_enabled_key; extern struct static_key_true net_cls_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true perf_event_cgrp_subsys_enabled_key; extern struct static_key_true perf_event_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true net_prio_cgrp_subsys_enabled_key; extern struct static_key_true net_prio_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true hugetlb_cgrp_subsys_enabled_key; extern struct static_key_true hugetlb_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true pids_cgrp_subsys_enabled_key; extern struct static_key_true pids_cgrp_subsys_on_dfl_key; + + + +extern struct static_key_true rdma_cgrp_subsys_enabled_key; extern struct static_key_true rdma_cgrp_subsys_on_dfl_key; + + + + + + +extern struct static_key_true debug_cgrp_subsys_enabled_key; extern struct static_key_true debug_cgrp_subsys_on_dfl_key; +# 82 "./include/linux/cgroup.h" 2 +# 98 "./include/linux/cgroup.h" +bool css_has_online_children(struct cgroup_subsys_state *css); +struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); +struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); +struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); +struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, + struct cgroup_subsys *ss); + +struct cgroup *cgroup_get_from_path(const char *path); +struct cgroup *cgroup_get_from_fd(int fd); + +int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); +int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); + +int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); +int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); +int cgroup_rm_cftypes(struct cftype *cfts); +void cgroup_file_notify(struct cgroup_file *cfile); + +int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); +int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); +int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk); + +void cgroup_fork(struct task_struct *p); +extern int cgroup_can_fork(struct task_struct *p, + struct kernel_clone_args *kargs); +extern void cgroup_cancel_fork(struct task_struct *p, + struct kernel_clone_args *kargs); +extern void cgroup_post_fork(struct task_struct *p, + struct kernel_clone_args *kargs); +void cgroup_exit(struct task_struct *p); +void cgroup_release(struct task_struct *p); +void cgroup_free(struct task_struct *p); + +int cgroup_init_early(void); +int cgroup_init(void); + +int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); + + + + + +struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *parent); +struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *css); +struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); +struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *css); + +struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp); +struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, + struct cgroup_subsys_state **dst_cssp); + +void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, + struct css_task_iter *it); +struct task_struct *css_task_iter_next(struct css_task_iter *it); +void css_task_iter_end(struct css_task_iter *it); +# 310 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 cgroup_id(struct cgroup *cgrp) +{ + return cgrp->kn->id; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void css_get(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get(&css->refcnt); +} +# 334 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} +# 351 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool css_tryget(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + return percpu_ref_tryget(&css->refcnt); + return true; +} +# 368 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool css_tryget_online(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + return percpu_ref_tryget_live(&css->refcnt); + return true; +} +# 390 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool css_is_dying(struct cgroup_subsys_state *css) +{ + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void css_put(struct cgroup_subsys_state *css) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put(&css->refcnt); +} +# 414 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_get(struct cgroup *cgrp) +{ + css_get(&cgrp->self); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cgroup_tryget(struct cgroup *cgrp) +{ + return css_tryget(&cgrp->self); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_put(struct cgroup *cgrp) +{ + css_put(&cgrp->self); +} +# 449 "./include/linux/cgroup.h" +extern struct mutex cgroup_mutex; +extern spinlock_t css_set_lock; +# 479 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct css_set *task_css_set(struct task_struct *task) +{ + return ({ typeof(*((task)->cgroups)) *________p1 = (typeof(*((task)->cgroups)) *)({ do { extern void __compiletime_assert_1018(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((task)->cgroups)) == sizeof(char) || sizeof(((task)->cgroups)) == sizeof(short) || sizeof(((task)->cgroups)) == sizeof(int) || sizeof(((task)->cgroups)) == sizeof(long)) || sizeof(((task)->cgroups)) == sizeof(long long))) __compiletime_assert_1018(); } while (0); ({ typeof( _Generic((((task)->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((task)->cgroups)))) __x = (*(const volatile typeof( _Generic((((task)->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((task)->cgroups)))) *)&(((task)->cgroups))); do { } while (0); (typeof(((task)->cgroups)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&cgroup_mutex)->dep_map) || lock_is_held(&(&css_set_lock)->dep_map) || ((task)->flags & 0x00000004) || (false)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/cgroup.h", 481, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*((task)->cgroups)) *)(________p1)); }); +} +# 491 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup_subsys_state *task_css(struct task_struct *task, + int subsys_id) +{ + return ({ typeof(*(((task))->cgroups)) *________p1 = (typeof(*(((task))->cgroups)) *)({ do { extern void __compiletime_assert_1019(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((((task))->cgroups)) == sizeof(char) || sizeof((((task))->cgroups)) == sizeof(short) || sizeof((((task))->cgroups)) == sizeof(int) || sizeof((((task))->cgroups)) == sizeof(long)) || sizeof((((task))->cgroups)) == sizeof(long long))) __compiletime_assert_1019(); } while (0); ({ typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) __x = (*(const volatile typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) *)&((((task))->cgroups))); do { } while (0); (typeof((((task))->cgroups)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&cgroup_mutex)->dep_map) || lock_is_held(&(&css_set_lock)->dep_map) || (((task))->flags & 0x00000004) || ((false))) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/cgroup.h", 494, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(((task))->cgroups)) *)(________p1)); })->subsys[(subsys_id)]; +} +# 506 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup_subsys_state * +task_get_css(struct task_struct *task, int subsys_id) +{ + struct cgroup_subsys_state *css; + + rcu_read_lock(); + while (true) { + css = task_css(task, subsys_id); + + + + + + + if (__builtin_expect(!!(css_tryget(css)), 1)) + break; + cpu_relax(); + } + rcu_read_unlock(); + return css; +} +# 536 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_css_is_root(struct task_struct *task, int subsys_id) +{ + return ({ typeof(*(((task))->cgroups)) *________p1 = (typeof(*(((task))->cgroups)) *)({ do { extern void __compiletime_assert_1020(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((((task))->cgroups)) == sizeof(char) || sizeof((((task))->cgroups)) == sizeof(short) || sizeof((((task))->cgroups)) == sizeof(int) || sizeof((((task))->cgroups)) == sizeof(long)) || sizeof((((task))->cgroups)) == sizeof(long long))) __compiletime_assert_1020(); } while (0); ({ typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) __x = (*(const volatile typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) *)&((((task))->cgroups))); do { } while (0); (typeof((((task))->cgroups)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&cgroup_mutex)->dep_map) || lock_is_held(&(&css_set_lock)->dep_map) || (((task))->flags & 0x00000004) || ((true))) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/cgroup.h", 538, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(((task))->cgroups)) *)(________p1)); })->subsys[(subsys_id)] == + init_css_set.subsys[subsys_id]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup *task_cgroup(struct task_struct *task, + int subsys_id) +{ + return task_css(task, subsys_id)->cgroup; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup *task_dfl_cgroup(struct task_struct *task) +{ + return task_css_set(task)->dfl_cgrp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup *cgroup_parent(struct cgroup *cgrp) +{ + struct cgroup_subsys_state *parent_css = cgrp->self.parent; + + if (parent_css) + return ({ void *__mptr = (void *)(parent_css); do { extern void __compiletime_assert_1021(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(parent_css)), typeof(((struct cgroup *)0)->self)) && !__builtin_types_compatible_p(typeof(*(parent_css)), typeof(void))))) __compiletime_assert_1021(); } while (0); ((struct cgroup *)(__mptr - __builtin_offsetof(struct cgroup, self))); }); + return ((void *)0); +} +# 571 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cgroup_is_descendant(struct cgroup *cgrp, + struct cgroup *ancestor) +{ + if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) + return false; + return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor); +} +# 590 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup *cgroup_ancestor(struct cgroup *cgrp, + int ancestor_level) +{ + if (cgrp->level < ancestor_level) + return ((void *)0); + while (cgrp && cgrp->level > ancestor_level) + cgrp = cgroup_parent(cgrp); + return cgrp; +} +# 609 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_under_cgroup_hierarchy(struct task_struct *task, + struct cgroup *ancestor) +{ + struct css_set *cset = task_css_set(task); + + return cgroup_is_descendant(cset->dfl_cgrp, ancestor); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cgroup_is_populated(struct cgroup *cgrp) +{ + return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + + cgrp->nr_populated_threaded_children; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ino_t cgroup_ino(struct cgroup *cgrp) +{ + return kernfs_ino(cgrp->kn); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cftype *of_cft(struct kernfs_open_file *of) +{ + return of->kn->priv; +} + +struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cftype *seq_cft(struct seq_file *seq) +{ + return of_cft(seq->private); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup_subsys_state *seq_css(struct seq_file *seq) +{ + return of_css(seq->private); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) +{ + return kernfs_name(cgrp->kn, buf, buflen); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) +{ + return kernfs_path(cgrp->kn, buf, buflen); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pr_cont_cgroup_name(struct cgroup *cgrp) +{ + pr_cont_kernfs_name(cgrp->kn); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pr_cont_cgroup_path(struct cgroup *cgrp) +{ + pr_cont_kernfs_path(cgrp->kn); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct psi_group *cgroup_psi(struct cgroup *cgrp) +{ + return &cgrp->psi; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_init_kthreadd(void) +{ + + + + + + get_current()->no_cgroup_migration = 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_kthread_ready(void) +{ + + + + + get_current()->no_cgroup_migration = 0; +} + +void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); +# 752 "./include/linux/cgroup.h" +void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); +void cgroup_rstat_flush(struct cgroup *cgrp); +void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp); +void cgroup_rstat_flush_hold(struct cgroup *cgrp); +void cgroup_rstat_flush_release(void); + + + + + +void cpuacct_charge(struct task_struct *tsk, u64 cputime); +void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); + + + + + + +void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); +void __cgroup_account_cputime_field(struct cgroup *cgrp, + enum cpu_usage_stat index, u64 delta_exec); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_account_cputime(struct task_struct *task, + u64 delta_exec) +{ + struct cgroup *cgrp; + + cpuacct_charge(task, delta_exec); + + rcu_read_lock(); + cgrp = task_dfl_cgroup(task); + if (cgroup_parent(cgrp)) + __cgroup_account_cputime(cgrp, delta_exec); + rcu_read_unlock(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_account_cputime_field(struct task_struct *task, + enum cpu_usage_stat index, + u64 delta_exec) +{ + struct cgroup *cgrp; + + cpuacct_account_field(task, index, delta_exec); + + rcu_read_lock(); + cgrp = task_dfl_cgroup(task); + if (cgroup_parent(cgrp)) + __cgroup_account_cputime_field(cgrp, index, delta_exec); + rcu_read_unlock(); +} +# 820 "./include/linux/cgroup.h" +extern spinlock_t cgroup_sk_update_lock; + + +void cgroup_sk_alloc_disable(void); +void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_free(struct sock_cgroup_data *skcd); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) +{ + + unsigned long v; + + + + + + v = ({ do { extern void __compiletime_assert_1022(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(skcd->val) == sizeof(char) || sizeof(skcd->val) == sizeof(short) || sizeof(skcd->val) == sizeof(int) || sizeof(skcd->val) == sizeof(long)) || sizeof(skcd->val) == sizeof(long long))) __compiletime_assert_1022(); } while (0); ({ typeof( _Generic((skcd->val), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skcd->val))) __x = (*(const volatile typeof( _Generic((skcd->val), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (skcd->val))) *)&(skcd->val)); do { } while (0); (typeof(skcd->val))__x; }); }); + + if (v & 1) + return &cgrp_dfl_root.cgrp; + + return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; + + + +} +# 854 "./include/linux/cgroup.h" +struct cgroup_namespace { + refcount_t count; + struct ns_common ns; + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct css_set *root_cset; +}; + +extern struct cgroup_namespace init_cgroup_ns; + + + +void free_cgroup_ns(struct cgroup_namespace *ns); + +struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, + struct user_namespace *user_ns, + struct cgroup_namespace *old_ns); + +int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, + struct cgroup_namespace *ns); +# 887 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_cgroup_ns(struct cgroup_namespace *ns) +{ + if (ns) + refcount_inc(&ns->count); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_cgroup_ns(struct cgroup_namespace *ns) +{ + if (ns && refcount_dec_and_test(&ns->count)) + free_cgroup_ns(ns); +} + + + +void cgroup_enter_frozen(void); +void cgroup_leave_frozen(bool always_leave); +void cgroup_update_frozen(struct cgroup *cgrp); +void cgroup_freeze(struct cgroup *cgrp, bool freeze); +void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, + struct cgroup *dst); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cgroup_task_freeze(struct task_struct *task) +{ + bool ret; + + if (task->flags & 0x00200000) + return false; + + rcu_read_lock(); + ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); + rcu_read_unlock(); + + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool cgroup_task_frozen(struct task_struct *task) +{ + return task->frozen; +} +# 943 "./include/linux/cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_bpf_get(struct cgroup *cgrp) +{ + percpu_ref_get(&cgrp->bpf.refcnt); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cgroup_bpf_put(struct cgroup *cgrp) +{ + percpu_ref_put(&cgrp->bpf.refcnt); +} +# 58 "./include/linux/perf_event.h" 2 + +# 1 "./include/linux/security.h" 1 +# 34 "./include/linux/security.h" +struct linux_binprm; +struct cred; +struct rlimit; +struct kernel_siginfo; +struct sembuf; +struct kern_ipc_perm; +struct audit_context; +struct super_block; +struct inode; +struct dentry; +struct file; +struct vfsmount; +struct path; +struct qstr; +struct iattr; +struct fown_struct; +struct file_operations; +struct msg_msg; +struct xattr; +struct kernfs_node; +struct xfrm_sec_ctx; +struct mm_struct; +struct fs_context; +struct fs_parameter; +enum fs_value_type; +struct watch; +struct watch_notification; +# 72 "./include/linux/security.h" +struct ctl_table; +struct audit_krule; +struct user_namespace; +struct timezone; + +enum lsm_event { + LSM_POLICY_CHANGE, +}; +# 105 "./include/linux/security.h" +enum lockdown_reason { + LOCKDOWN_NONE, + LOCKDOWN_MODULE_SIGNATURE, + LOCKDOWN_DEV_MEM, + LOCKDOWN_EFI_TEST, + LOCKDOWN_KEXEC, + LOCKDOWN_HIBERNATION, + LOCKDOWN_PCI_ACCESS, + LOCKDOWN_IOPORT, + LOCKDOWN_MSR, + LOCKDOWN_ACPI_TABLES, + LOCKDOWN_PCMCIA_CIS, + LOCKDOWN_TIOCSSERIAL, + LOCKDOWN_MODULE_PARAMETERS, + LOCKDOWN_MMIOTRACE, + LOCKDOWN_DEBUGFS, + LOCKDOWN_XMON_WR, + LOCKDOWN_INTEGRITY_MAX, + LOCKDOWN_KCORE, + LOCKDOWN_KPROBES, + LOCKDOWN_BPF_READ, + LOCKDOWN_PERF, + LOCKDOWN_TRACEFS, + LOCKDOWN_XMON_RW, + LOCKDOWN_CONFIDENTIALITY_MAX, +}; + +extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1]; + + +extern int cap_capable(const struct cred *cred, struct user_namespace *ns, + int cap, unsigned int opts); +extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); +extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); +extern int cap_ptrace_traceme(struct task_struct *parent); +extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +extern int cap_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); +extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); +extern int cap_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +extern int cap_inode_removexattr(struct dentry *dentry, const char *name); +extern int cap_inode_need_killpriv(struct dentry *dentry); +extern int cap_inode_killpriv(struct dentry *dentry); +extern int cap_inode_getsecurity(struct inode *inode, const char *name, + void **buffer, bool alloc); +extern int cap_mmap_addr(unsigned long addr); +extern int cap_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags); +extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); +extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +extern int cap_task_setscheduler(struct task_struct *p); +extern int cap_task_setioprio(struct task_struct *p, int ioprio); +extern int cap_task_setnice(struct task_struct *p, int nice); +extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); + +struct msghdr; +struct sk_buff; +struct sock; +struct sockaddr; +struct socket; +struct flowi; +struct dst_entry; +struct xfrm_selector; +struct xfrm_policy; +struct xfrm_state; +struct xfrm_user_sec_ctx; +struct seq_file; +struct sctp_endpoint; + + +extern unsigned long mmap_min_addr; +extern unsigned long dac_mmap_min_addr; +# 206 "./include/linux/security.h" +struct sched_param; +struct request_sock; + + + + + + + +extern int mmap_min_addr_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); + + + +typedef int (*initxattrs) (struct inode *inode, + const struct xattr *xattr_array, void *fs_data); + + + + + + +enum kernel_load_data_id { + LOADING_UNKNOWN, LOADING_FIRMWARE, LOADING_FIRMWARE_PREALLOC_BUFFER, LOADING_FIRMWARE_EFI_EMBEDDED, LOADING_MODULE, LOADING_KEXEC_IMAGE, LOADING_KEXEC_INITRAMFS, LOADING_POLICY, LOADING_X509_CERTIFICATE, LOADING_MAX_ID, +}; + +static const char * const kernel_load_data_str[] = { + "unknown", "firmware", "firmware", "firmware", "kernel-module", "kexec-image", "kexec-initramfs", "security-policy", "x509-certificate", "", +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *kernel_load_data_id_str(enum kernel_load_data_id id) +{ + if ((unsigned)id >= LOADING_MAX_ID) + return kernel_load_data_str[LOADING_UNKNOWN]; + + return kernel_load_data_str[id]; +} + + + +int call_blocking_lsm_notifier(enum lsm_event event, void *data); +int register_blocking_lsm_notifier(struct notifier_block *nb); +int unregister_blocking_lsm_notifier(struct notifier_block *nb); + + +extern int security_init(void); +extern int early_security_init(void); + + +int security_binder_set_context_mgr(struct task_struct *mgr); +int security_binder_transaction(struct task_struct *from, + struct task_struct *to); +int security_binder_transfer_binder(struct task_struct *from, + struct task_struct *to); +int security_binder_transfer_file(struct task_struct *from, + struct task_struct *to, struct file *file); +int security_ptrace_access_check(struct task_struct *child, unsigned int mode); +int security_ptrace_traceme(struct task_struct *parent); +int security_capget(struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted); +int security_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); +int security_capable(const struct cred *cred, + struct user_namespace *ns, + int cap, + unsigned int opts); +int security_quotactl(int cmds, int type, int id, struct super_block *sb); +int security_quota_on(struct dentry *dentry); +int security_syslog(int type); +int security_settime64(const struct timespec64 *ts, const struct timezone *tz); +int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); +int security_bprm_creds_for_exec(struct linux_binprm *bprm); +int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); +int security_bprm_check(struct linux_binprm *bprm); +void security_bprm_committing_creds(struct linux_binprm *bprm); +void security_bprm_committed_creds(struct linux_binprm *bprm); +int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); +int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); +int security_sb_alloc(struct super_block *sb); +void security_sb_free(struct super_block *sb); +void security_free_mnt_opts(void **mnt_opts); +int security_sb_eat_lsm_opts(char *options, void **mnt_opts); +int security_sb_remount(struct super_block *sb, void *mnt_opts); +int security_sb_kern_mount(struct super_block *sb); +int security_sb_show_options(struct seq_file *m, struct super_block *sb); +int security_sb_statfs(struct dentry *dentry); +int security_sb_mount(const char *dev_name, const struct path *path, + const char *type, unsigned long flags, void *data); +int security_sb_umount(struct vfsmount *mnt, int flags); +int security_sb_pivotroot(const struct path *old_path, const struct path *new_path); +int security_sb_set_mnt_opts(struct super_block *sb, + void *mnt_opts, + unsigned long kern_flags, + unsigned long *set_kern_flags); +int security_sb_clone_mnt_opts(const struct super_block *oldsb, + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags); +int security_add_mnt_opt(const char *option, const char *val, + int len, void **mnt_opts); +int security_move_mount(const struct path *from_path, const struct path *to_path); +int security_dentry_init_security(struct dentry *dentry, int mode, + const struct qstr *name, void **ctx, + u32 *ctxlen); +int security_dentry_create_files_as(struct dentry *dentry, int mode, + struct qstr *name, + const struct cred *old, + struct cred *new); +int security_path_notify(const struct path *path, u64 mask, + unsigned int obj_type); +int security_inode_alloc(struct inode *inode); +void security_inode_free(struct inode *inode); +int security_inode_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, + initxattrs initxattrs, void *fs_data); +int security_old_inode_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, const char **name, + void **value, size_t *len); +int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); +int security_inode_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry); +int security_inode_unlink(struct inode *dir, struct dentry *dentry); +int security_inode_symlink(struct inode *dir, struct dentry *dentry, + const char *old_name); +int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); +int security_inode_rmdir(struct inode *dir, struct dentry *dentry); +int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev); +int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags); +int security_inode_readlink(struct dentry *dentry); +int security_inode_follow_link(struct dentry *dentry, struct inode *inode, + bool rcu); +int security_inode_permission(struct inode *inode, int mask); +int security_inode_setattr(struct dentry *dentry, struct iattr *attr); +int security_inode_getattr(const struct path *path); +int security_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +void security_inode_post_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +int security_inode_getxattr(struct dentry *dentry, const char *name); +int security_inode_listxattr(struct dentry *dentry); +int security_inode_removexattr(struct dentry *dentry, const char *name); +int security_inode_need_killpriv(struct dentry *dentry); +int security_inode_killpriv(struct dentry *dentry); +int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc); +int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); +int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); +void security_inode_getsecid(struct inode *inode, u32 *secid); +int security_inode_copy_up(struct dentry *src, struct cred **new); +int security_inode_copy_up_xattr(const char *name); +int security_kernfs_init_security(struct kernfs_node *kn_dir, + struct kernfs_node *kn); +int security_file_permission(struct file *file, int mask); +int security_file_alloc(struct file *file); +void security_file_free(struct file *file); +int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +int security_mmap_file(struct file *file, unsigned long prot, + unsigned long flags); +int security_mmap_addr(unsigned long addr); +int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, + unsigned long prot); +int security_file_lock(struct file *file, unsigned int cmd); +int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); +void security_file_set_fowner(struct file *file); +int security_file_send_sigiotask(struct task_struct *tsk, + struct fown_struct *fown, int sig); +int security_file_receive(struct file *file); +int security_file_open(struct file *file); +int security_task_alloc(struct task_struct *task, unsigned long clone_flags); +void security_task_free(struct task_struct *task); +int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); +void security_cred_free(struct cred *cred); +int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); +void security_transfer_creds(struct cred *new, const struct cred *old); +void security_cred_getsecid(const struct cred *c, u32 *secid); +int security_kernel_act_as(struct cred *new, u32 secid); +int security_kernel_create_files_as(struct cred *new, struct inode *inode); +int security_kernel_module_request(char *kmod_name); +int security_kernel_load_data(enum kernel_load_data_id id); +int security_kernel_read_file(struct file *file, enum kernel_read_file_id id); +int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, + enum kernel_read_file_id id); +int security_task_fix_setuid(struct cred *new, const struct cred *old, + int flags); +int security_task_fix_setgid(struct cred *new, const struct cred *old, + int flags); +int security_task_setpgid(struct task_struct *p, pid_t pgid); +int security_task_getpgid(struct task_struct *p); +int security_task_getsid(struct task_struct *p); +void security_task_getsecid(struct task_struct *p, u32 *secid); +int security_task_setnice(struct task_struct *p, int nice); +int security_task_setioprio(struct task_struct *p, int ioprio); +int security_task_getioprio(struct task_struct *p); +int security_task_prlimit(const struct cred *cred, const struct cred *tcred, + unsigned int flags); +int security_task_setrlimit(struct task_struct *p, unsigned int resource, + struct rlimit *new_rlim); +int security_task_setscheduler(struct task_struct *p); +int security_task_getscheduler(struct task_struct *p); +int security_task_movememory(struct task_struct *p); +int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, + int sig, const struct cred *cred); +int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5); +void security_task_to_inode(struct task_struct *p, struct inode *inode); +int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); +void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); +int security_msg_msg_alloc(struct msg_msg *msg); +void security_msg_msg_free(struct msg_msg *msg); +int security_msg_queue_alloc(struct kern_ipc_perm *msq); +void security_msg_queue_free(struct kern_ipc_perm *msq); +int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg); +int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd); +int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, + struct msg_msg *msg, int msqflg); +int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, + struct task_struct *target, long type, int mode); +int security_shm_alloc(struct kern_ipc_perm *shp); +void security_shm_free(struct kern_ipc_perm *shp); +int security_shm_associate(struct kern_ipc_perm *shp, int shmflg); +int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd); +int security_shm_shmat(struct kern_ipc_perm *shp, char *shmaddr, int shmflg); +int security_sem_alloc(struct kern_ipc_perm *sma); +void security_sem_free(struct kern_ipc_perm *sma); +int security_sem_associate(struct kern_ipc_perm *sma, int semflg); +int security_sem_semctl(struct kern_ipc_perm *sma, int cmd); +int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, + unsigned nsops, int alter); +void security_d_instantiate(struct dentry *dentry, struct inode *inode); +int security_getprocattr(struct task_struct *p, const char *lsm, char *name, + char **value); +int security_setprocattr(const char *lsm, const char *name, void *value, + size_t size); +int security_netlink_send(struct sock *sk, struct sk_buff *skb); +int security_ismaclabel(const char *name); +int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); +int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); +void security_release_secctx(char *secdata, u32 seclen); +void security_inode_invalidate_secctx(struct inode *inode); +int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); +int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); +int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); +int security_locked_down(enum lockdown_reason what); +# 1297 "./include/linux/security.h" +int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n); +# 1310 "./include/linux/security.h" +int security_watch_key(struct key *key); +# 1320 "./include/linux/security.h" +int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); +int security_unix_may_send(struct socket *sock, struct socket *other); +int security_socket_create(int family, int type, int protocol, int kern); +int security_socket_post_create(struct socket *sock, int family, + int type, int protocol, int kern); +int security_socket_socketpair(struct socket *socka, struct socket *sockb); +int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen); +int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen); +int security_socket_listen(struct socket *sock, int backlog); +int security_socket_accept(struct socket *sock, struct socket *newsock); +int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size); +int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, + int size, int flags); +int security_socket_getsockname(struct socket *sock); +int security_socket_getpeername(struct socket *sock); +int security_socket_getsockopt(struct socket *sock, int level, int optname); +int security_socket_setsockopt(struct socket *sock, int level, int optname); +int security_socket_shutdown(struct socket *sock, int how); +int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb); +int security_socket_getpeersec_stream(struct socket *sock, char *optval, + int *optlen, unsigned len); +int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid); +int security_sk_alloc(struct sock *sk, int family, gfp_t priority); +void security_sk_free(struct sock *sk); +void security_sk_clone(const struct sock *sk, struct sock *newsk); +void security_sk_classify_flow(struct sock *sk, struct flowi *fl); +void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); +void security_sock_graft(struct sock*sk, struct socket *parent); +int security_inet_conn_request(struct sock *sk, + struct sk_buff *skb, struct request_sock *req); +void security_inet_csk_clone(struct sock *newsk, + const struct request_sock *req); +void security_inet_conn_established(struct sock *sk, + struct sk_buff *skb); +int security_secmark_relabel_packet(u32 secid); +void security_secmark_refcount_inc(void); +void security_secmark_refcount_dec(void); +int security_tun_dev_alloc_security(void **security); +void security_tun_dev_free_security(void *security); +int security_tun_dev_create(void); +int security_tun_dev_attach_queue(void *security); +int security_tun_dev_attach(struct sock *sk, void *security); +int security_tun_dev_open(void *security); +int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb); +int security_sctp_bind_connect(struct sock *sk, int optname, + struct sockaddr *address, int addrlen); +void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk, + struct sock *newsk); +# 1588 "./include/linux/security.h" +int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey); +int security_ib_endport_manage_subnet(void *sec, const char *name, u8 port_num); +int security_ib_alloc_security(void **sec); +void security_ib_free_security(void *sec); +# 1615 "./include/linux/security.h" +int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); +int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); +void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); +int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); +int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx); +int security_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid); +int security_xfrm_state_delete(struct xfrm_state *x); +void security_xfrm_state_free(struct xfrm_state *x); +int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); +int security_xfrm_state_pol_flow_match(struct xfrm_state *x, + struct xfrm_policy *xp, + const struct flowi *fl); +int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); +void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); +# 1699 "./include/linux/security.h" +int security_path_unlink(const struct path *dir, struct dentry *dentry); +int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode); +int security_path_rmdir(const struct path *dir, struct dentry *dentry); +int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, + unsigned int dev); +int security_path_truncate(const struct path *path); +int security_path_symlink(const struct path *dir, struct dentry *dentry, + const char *old_name); +int security_path_link(struct dentry *old_dentry, const struct path *new_dir, + struct dentry *new_dentry); +int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, + const struct path *new_dir, struct dentry *new_dentry, + unsigned int flags); +int security_path_chmod(const struct path *path, umode_t mode); +int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid); +int security_path_chroot(const struct path *path); +# 1784 "./include/linux/security.h" +int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); +void security_key_free(struct key *key); +int security_key_permission(key_ref_t key_ref, const struct cred *cred, + enum key_need_perm need_perm); +int security_key_getsecurity(struct key *key, char **_buffer); +# 1821 "./include/linux/security.h" +int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); +int security_audit_rule_known(struct audit_krule *krule); +int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); +void security_audit_rule_free(void *lsmrule); +# 1853 "./include/linux/security.h" +extern struct dentry *securityfs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); +extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); +struct dentry *securityfs_create_symlink(const char *name, + struct dentry *parent, + const char *target, + const struct inode_operations *iops); +extern void securityfs_remove(struct dentry *dentry); +# 1894 "./include/linux/security.h" +union bpf_attr; +struct bpf_map; +struct bpf_prog; +struct bpf_prog_aux; + +extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size); +extern int security_bpf_map(struct bpf_map *map, fmode_t fmode); +extern int security_bpf_prog(struct bpf_prog *prog); +extern int security_bpf_map_alloc(struct bpf_map *map); +extern void security_bpf_map_free(struct bpf_map *map); +extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux); +extern void security_bpf_prog_free(struct bpf_prog_aux *aux); +# 1942 "./include/linux/security.h" +struct perf_event_attr; +struct perf_event; + + +extern int security_perf_event_open(struct perf_event_attr *attr, int type); +extern int security_perf_event_alloc(struct perf_event *event); +extern void security_perf_event_free(struct perf_event *event); +extern int security_perf_event_read(struct perf_event *event); +extern int security_perf_event_write(struct perf_event *event); +# 60 "./include/linux/perf_event.h" 2 + + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[]; +}; + +struct perf_callchain_entry_ctx { + struct perf_callchain_entry *entry; + u32 max_stack; + u32 nr; + short contexts; + bool contexts_maxed; +}; + +typedef unsigned long (*perf_copy_f)(void *dst, const void *src, + unsigned long off, unsigned long len); + +struct perf_raw_frag { + union { + struct perf_raw_frag *next; + unsigned long pad; + }; + perf_copy_f copy; + void *data; + u32 size; +} __attribute__((__packed__)); + +struct perf_raw_record { + struct perf_raw_frag frag; + u32 size; +}; +# 113 "./include/linux/perf_event.h" +struct perf_branch_stack { + __u64 nr; + __u64 hw_idx; + struct perf_branch_entry entries[]; +}; + +struct task_struct; + + + + +struct hw_perf_event_extra { + u64 config; + unsigned int reg; + int alloc; + int idx; +}; + + + + +struct hw_perf_event { + + union { + struct { + u64 config; + u64 last_tag; + unsigned long config_base; + unsigned long event_base; + int event_base_rdpmc; + int idx; + int last_cpu; + int flags; + + struct hw_perf_event_extra extra_reg; + struct hw_perf_event_extra branch_reg; + }; + struct { + struct hrtimer hrtimer; + }; + struct { + + struct list_head tp_list; + }; + struct { + u64 pwr_acc; + u64 ptsc; + }; + + struct { + + + + + + struct arch_hw_breakpoint info; + struct list_head bp_list; + }; + + struct { + u8 iommu_bank; + u8 iommu_cntr; + u16 padding; + u64 conf; + u64 conf1; + }; + }; + + + + + struct task_struct *target; + + + + + + void *addr_filters; + + + unsigned long addr_filters_gen; +# 202 "./include/linux/perf_event.h" + int state; + + + + + + local64_t prev_count; + + + + + u64 sample_period; + + + + + u64 last_period; + + + + + + + local64_t period_left; + + + + + + u64 interrupts_seq; + u64 interrupts; + + + + + + u64 freq_time_stamp; + u64 freq_count_stamp; + +}; + +struct perf_event; +# 264 "./include/linux/perf_event.h" +struct perf_output_handle; + + + + +struct pmu { + struct list_head entry; + + struct module *module; + struct device *dev; + const struct attribute_group **attr_groups; + const struct attribute_group **attr_update; + const char *name; + int type; + + + + + int capabilities; + + int *pmu_disable_count; + struct perf_cpu_context *pmu_cpu_context; + atomic_t exclusive_cnt; + int task_ctx_nr; + int hrtimer_interval_ms; + + + unsigned int nr_addr_filters; + + + + + + void (*pmu_enable) (struct pmu *pmu); + void (*pmu_disable) (struct pmu *pmu); +# 316 "./include/linux/perf_event.h" + int (*event_init) (struct perf_event *event); + + + + + + void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); + void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); +# 351 "./include/linux/perf_event.h" + int (*add) (struct perf_event *event, int flags); + void (*del) (struct perf_event *event, int flags); +# 372 "./include/linux/perf_event.h" + void (*start) (struct perf_event *event, int flags); + void (*stop) (struct perf_event *event, int flags); + + + + + + + + void (*read) (struct perf_event *event); +# 393 "./include/linux/perf_event.h" + void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); +# 402 "./include/linux/perf_event.h" + int (*commit_txn) (struct pmu *pmu); + + + + + + + void (*cancel_txn) (struct pmu *pmu); + + + + + + int (*event_idx) (struct perf_event *event); + + + + + void (*sched_task) (struct perf_event_context *ctx, + bool sched_in); + + + + size_t task_ctx_size; + + + + + + + + void (*swap_task_ctx) (struct perf_event_context *prev, + struct perf_event_context *next); + + + + + + void *(*setup_aux) (struct perf_event *event, void **pages, + int nr_pages, bool overwrite); + + + + + + void (*free_aux) (void *aux); +# 458 "./include/linux/perf_event.h" + long (*snapshot_aux) (struct perf_event *event, + struct perf_output_handle *handle, + unsigned long size); +# 470 "./include/linux/perf_event.h" + int (*addr_filters_validate) (struct list_head *filters); +# 484 "./include/linux/perf_event.h" + void (*addr_filters_sync) (struct perf_event *event); +# 494 "./include/linux/perf_event.h" + int (*aux_output_match) (struct perf_event *event); + + + + + + int (*filter_match) (struct perf_event *event); + + + + + int (*check_period) (struct perf_event *event, u64 value); +}; + +enum perf_addr_filter_action_t { + PERF_ADDR_FILTER_ACTION_STOP = 0, + PERF_ADDR_FILTER_ACTION_START, + PERF_ADDR_FILTER_ACTION_FILTER, +}; +# 524 "./include/linux/perf_event.h" +struct perf_addr_filter { + struct list_head entry; + struct path path; + unsigned long offset; + unsigned long size; + enum perf_addr_filter_action_t action; +}; +# 542 "./include/linux/perf_event.h" +struct perf_addr_filters_head { + struct list_head list; + raw_spinlock_t lock; + unsigned int nr_file_filters; +}; + +struct perf_addr_filter_range { + unsigned long start; + unsigned long size; +}; + + + + +enum perf_event_state { + PERF_EVENT_STATE_DEAD = -4, + PERF_EVENT_STATE_EXIT = -3, + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, +}; + +struct file; +struct perf_sample_data; + +typedef void (*perf_overflow_handler_t)(struct perf_event *, + struct perf_sample_data *, + struct pt_regs *regs); +# 585 "./include/linux/perf_event.h" +struct swevent_hlist { + struct hlist_head heads[(1 << 8)]; + struct callback_head callback_head; +}; + + + + + + + +struct perf_cgroup; +struct perf_buffer; + +struct pmu_event_list { + raw_spinlock_t lock; + struct list_head list; +}; +# 611 "./include/linux/perf_event.h" +struct perf_event { + + + + + + + struct list_head event_entry; + + + + + + struct list_head sibling_list; + struct list_head active_list; + + + + struct rb_node group_node; + u64 group_index; + + + + + + struct list_head migrate_entry; + + struct hlist_node hlist_entry; + struct list_head active_entry; + int nr_siblings; + + + int event_caps; + + int group_caps; + + struct perf_event *group_leader; + struct pmu *pmu; + void *pmu_private; + + enum perf_event_state state; + unsigned int attach_state; + local64_t count; + atomic64_t child_count; + + + + + + + + u64 total_time_enabled; + u64 total_time_running; + u64 tstamp; +# 674 "./include/linux/perf_event.h" + u64 shadow_ctx_time; + + struct perf_event_attr attr; + u16 header_size; + u16 id_header_size; + u16 read_size; + struct hw_perf_event hw; + + struct perf_event_context *ctx; + atomic_long_t refcount; + + + + + + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + + + + + struct mutex child_mutex; + struct list_head child_list; + struct perf_event *parent; + + int oncpu; + int cpu; + + struct list_head owner_entry; + struct task_struct *owner; + + + struct mutex mmap_mutex; + atomic_t mmap_count; + + struct perf_buffer *rb; + struct list_head rb_entry; + unsigned long rcu_batches; + int rcu_pending; + + + wait_queue_head_t waitq; + struct fasync_struct *fasync; + + + int pending_wakeup; + int pending_kill; + int pending_disable; + struct irq_work pending; + + atomic_t event_limit; + + + struct perf_addr_filters_head addr_filters; + + struct perf_addr_filter_range *addr_filter_ranges; + unsigned long addr_filters_gen; + + + struct perf_event *aux_event; + + void (*destroy)(struct perf_event *); + struct callback_head callback_head; + + struct pid_namespace *ns; + u64 id; + + u64 (*clock)(void); + perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; + + perf_overflow_handler_t orig_overflow_handler; + struct bpf_prog *prog; + + + + struct trace_event_call *tp_event; + struct event_filter *filter; + + struct ftrace_ops ftrace_ops; + + + + + struct perf_cgroup *cgrp; + + + + void *security; + + struct list_head sb_list; + +}; + + +struct perf_event_groups { + struct rb_root tree; + u64 index; +}; + + + + + + +struct perf_event_context { + struct pmu *pmu; + + + + + raw_spinlock_t lock; + + + + + + struct mutex mutex; + + struct list_head active_ctx_list; + struct perf_event_groups pinned_groups; + struct perf_event_groups flexible_groups; + struct list_head event_list; + + struct list_head pinned_active; + struct list_head flexible_active; + + int nr_events; + int nr_active; + int is_active; + int nr_stat; + int nr_freq; + int rotate_disable; + + + + + int rotate_necessary; + refcount_t refcount; + struct task_struct *task; + + + + + u64 time; + u64 timestamp; + + + + + + struct perf_event_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + + int nr_cgroups; + + void *task_ctx_data; + struct callback_head callback_head; +}; +# 845 "./include/linux/perf_event.h" +struct perf_cpu_context { + struct perf_event_context ctx; + struct perf_event_context *task_ctx; + int active_oncpu; + int exclusive; + + raw_spinlock_t hrtimer_lock; + struct hrtimer hrtimer; + ktime_t hrtimer_interval; + unsigned int hrtimer_active; + + + struct perf_cgroup *cgrp; + struct list_head cgrp_cpuctx_entry; + + + struct list_head sched_cb_entry; + int sched_cb_usage; + + int online; + + + + + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; +}; + +struct perf_output_handle { + struct perf_event *event; + struct perf_buffer *rb; + unsigned long wakeup; + unsigned long size; + u64 aux_flags; + union { + void *addr; + unsigned long head; + }; + int page; +}; + +struct bpf_perf_event_data_kern { + bpf_user_pt_regs_t *regs; + struct perf_sample_data *data; + struct perf_event *event; +}; + + + + + + + +struct perf_cgroup_info { + u64 time; + u64 timestamp; +}; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; +}; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct perf_cgroup * +perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) +{ + return ({ void *__mptr = (void *)(({ typeof(*(((task))->cgroups)) *________p1 = (typeof(*(((task))->cgroups)) *)({ do { extern void __compiletime_assert_1023(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((((task))->cgroups)) == sizeof(char) || sizeof((((task))->cgroups)) == sizeof(short) || sizeof((((task))->cgroups)) == sizeof(int) || sizeof((((task))->cgroups)) == sizeof(long)) || sizeof((((task))->cgroups)) == sizeof(long long))) __compiletime_assert_1023(); } while (0); ({ typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) __x = (*(const volatile typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) *)&((((task))->cgroups))); do { } while (0); (typeof((((task))->cgroups)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&cgroup_mutex)->dep_map) || lock_is_held(&(&css_set_lock)->dep_map) || (((task))->flags & 0x00000004) || ((ctx ? lock_is_held(&(&ctx->lock)->dep_map) : true))) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/perf_event.h", 917, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(((task))->cgroups)) *)(________p1)); })->subsys[(perf_event_cgrp_id)]); do { extern void __compiletime_assert_1024(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(({ typeof(*(((task))->cgroups)) *________p1 = (typeof(*(((task))->cgroups)) *)({ do { extern void __compiletime_assert_1023(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((((task))->cgroups)) == sizeof(char) || sizeof((((task))->cgroups)) == sizeof(short) || sizeof((((task))->cgroups)) == sizeof(int) || sizeof((((task))->cgroups)) == sizeof(long)) || sizeof((((task))->cgroups)) == sizeof(long long))) __compiletime_assert_1023(); } while (0); ({ typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) __x = (*(const volatile typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) *)&((((task))->cgroups))); do { } while (0); (typeof((((task))->cgroups)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&cgroup_mutex)->dep_map) || lock_is_held(&(&css_set_lock)->dep_map) || (((task))->flags & 0x00000004) || ((ctx ? lock_is_held(&(&ctx->lock)->dep_map) : true))) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/perf_event.h", 917, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(((task))->cgroups)) *)(________p1)); })->subsys[(perf_event_cgrp_id)])), typeof(((struct perf_cgroup *)0)->css)) && !__builtin_types_compatible_p(typeof(*(({ typeof(*(((task))->cgroups)) *________p1 = (typeof(*(((task))->cgroups)) *)({ do { extern void __compiletime_assert_1023(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((((task))->cgroups)) == sizeof(char) || sizeof((((task))->cgroups)) == sizeof(short) || sizeof((((task))->cgroups)) == sizeof(int) || sizeof((((task))->cgroups)) == sizeof(long)) || sizeof((((task))->cgroups)) == sizeof(long long))) __compiletime_assert_1023(); } while (0); ({ typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) __x = (*(const volatile typeof( _Generic(((((task))->cgroups)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((((task))->cgroups)))) *)&((((task))->cgroups))); do { } while (0); (typeof((((task))->cgroups)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lock_is_held(&(&cgroup_mutex)->dep_map) || lock_is_held(&(&css_set_lock)->dep_map) || (((task))->flags & 0x00000004) || ((ctx ? lock_is_held(&(&ctx->lock)->dep_map) : true))) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/perf_event.h", 917, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(((task))->cgroups)) *)(________p1)); })->subsys[(perf_event_cgrp_id)])), typeof(void))))) __compiletime_assert_1024(); } while (0); ((struct perf_cgroup *)(__mptr - __builtin_offsetof(struct perf_cgroup, css))); }) + + + ; +} + + + + +extern void *perf_aux_output_begin(struct perf_output_handle *handle, + struct perf_event *event); +extern void perf_aux_output_end(struct perf_output_handle *handle, + unsigned long size); +extern int perf_aux_output_skip(struct perf_output_handle *handle, + unsigned long size); +extern void *perf_get_aux(struct perf_output_handle *handle); +extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); +extern void perf_event_itrace_started(struct perf_event *event); + +extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); +extern void perf_pmu_unregister(struct pmu *pmu); + +extern int perf_num_counters(void); +extern const char *perf_pmu_name(void); +extern void __perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task); +extern void __perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next); +extern int perf_event_init_task(struct task_struct *child); +extern void perf_event_exit_task(struct task_struct *child); +extern void perf_event_free_task(struct task_struct *task); +extern void perf_event_delayed_put(struct task_struct *task); +extern struct file *perf_event_get(unsigned int fd); +extern const struct perf_event *perf_get_event(struct file *file); +extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); +extern void perf_event_print_debug(void); +extern void perf_pmu_disable(struct pmu *pmu); +extern void perf_pmu_enable(struct pmu *pmu); +extern void perf_sched_cb_dec(struct pmu *pmu); +extern void perf_sched_cb_inc(struct pmu *pmu); +extern int perf_event_task_disable(void); +extern int perf_event_task_enable(void); + +extern void perf_pmu_resched(struct pmu *pmu); + +extern int perf_event_refresh(struct perf_event *event, int refresh); +extern void perf_event_update_userpage(struct perf_event *event); +extern int perf_event_release_kernel(struct perf_event *event); +extern struct perf_event * +perf_event_create_kernel_counter(struct perf_event_attr *attr, + int cpu, + struct task_struct *task, + perf_overflow_handler_t callback, + void *context); +extern void perf_pmu_migrate_context(struct pmu *pmu, + int src_cpu, int dst_cpu); +int perf_event_read_local(struct perf_event *event, u64 *value, + u64 *enabled, u64 *running); +extern u64 perf_event_read_value(struct perf_event *event, + u64 *enabled, u64 *running); + + +struct perf_sample_data { + + + + + u64 addr; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 period; + u64 weight; + u64 txn; + union perf_mem_data_src data_src; + + + + + + u64 type; + u64 ip; + struct { + u32 pid; + u32 tid; + } tid_entry; + u64 time; + u64 id; + u64 stream_id; + struct { + u32 cpu; + u32 reserved; + } cpu_entry; + struct perf_callchain_entry *callchain; + u64 aux_size; + + + + + + struct perf_regs regs_user; + struct pt_regs regs_user_copy; + + struct perf_regs regs_intr; + u64 stack_user_size; + + u64 phys_addr; + u64 cgroup; +} __attribute__((__aligned__((1 << (6))))); +# 1033 "./include/linux/perf_event.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_sample_data_init(struct perf_sample_data *data, + u64 addr, u64 period) +{ + + data->addr = addr; + data->raw = ((void *)0); + data->br_stack = ((void *)0); + data->period = period; + data->weight = 0; + data->data_src.val = ((((__u64)0x01) << 0) | (((__u64)0x01) << 5) | (((__u64)0x01) << 19) | (((__u64)0x01) << 24) | (((__u64)0x01) << 26)); + data->txn = 0; +} + +extern void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event); +extern void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event, + struct pt_regs *regs); + +extern int perf_event_overflow(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); + +extern void perf_event_output_forward(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); +extern void perf_event_output_backward(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); +extern int perf_event_output(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +is_default_overflow_handler(struct perf_event *event) +{ + if (__builtin_expect(!!(event->overflow_handler == perf_event_output_forward), 1)) + return true; + if (__builtin_expect(!!(event->overflow_handler == perf_event_output_backward), 0)) + return true; + return false; +} + +extern void +perf_event_header__init_id(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event); +extern void +perf_event__output_id_sample(struct perf_event *event, + struct perf_output_handle *handle, + struct perf_sample_data *sample); + +extern void +perf_log_lost_samples(struct perf_event *event, u64 lost); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool event_has_any_exclude_flag(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + + return attr->exclude_idle || attr->exclude_user || + attr->exclude_kernel || attr->exclude_hv || + attr->exclude_guest || attr->exclude_host; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_sampling_event(struct perf_event *event) +{ + return event->attr.sample_period != 0; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_software_event(struct perf_event *event) +{ + return event->event_caps & ((((1UL))) << (0)); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int in_software_context(struct perf_event *event) +{ + return event->ctx->pmu->task_ctx_nr == perf_sw_context; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_exclusive_pmu(struct pmu *pmu) +{ + return pmu->capabilities & 0x10; +} + +extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; + +extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); +extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); +# 1149 "./include/linux/perf_event.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_fetch_caller_regs(struct pt_regs *regs) +{ + { (regs)->ip = (((unsigned long)__builtin_return_address(0))); (regs)->sp = (unsigned long)__builtin_frame_address(0); (regs)->cs = (2*8); regs->flags = 0; }; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) +{ + if (static_key_false(&perf_swevent_enabled[event_id])) + __perf_sw_event(event_id, nr, regs, addr); +} + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope___perf_regs[4]; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs) __perf_regs[4]; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) +{ + if (static_key_false(&perf_swevent_enabled[event_id])) { + struct pt_regs *regs = ({ do { const void *__vpp_verify = (typeof((&__perf_regs[0]) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&__perf_regs[0])); (typeof(*(&__perf_regs[0])) *)tcp_ptr__; }); }); + + perf_fetch_caller_regs(regs); + ___perf_sw_event(event_id, nr, regs, addr); + } +} + +extern struct static_key_false perf_sched_events; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +perf_sw_migrate_enabled(void) +{ + if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) + return true; + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_event_task_migrate(struct task_struct *task) +{ + if (perf_sw_migrate_enabled()) + task->sched_migrated = 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_event_task_sched_in(struct task_struct *prev, + struct task_struct *task) +{ + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&perf_sched_events), struct static_key_true)) branch = arch_static_branch_jump(&(&perf_sched_events)->key, false); else if (__builtin_types_compatible_p(typeof(*&perf_sched_events), struct static_key_false)) branch = arch_static_branch(&(&perf_sched_events)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); })) + __perf_event_task_sched_in(prev, task); + + if (perf_sw_migrate_enabled() && task->sched_migrated) { + struct pt_regs *regs = ({ do { const void *__vpp_verify = (typeof((&__perf_regs[0]) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&__perf_regs[0])); (typeof(*(&__perf_regs[0])) *)tcp_ptr__; }); }); + + perf_fetch_caller_regs(regs); + ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); + task->sched_migrated = 0; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_event_task_sched_out(struct task_struct *prev, + struct task_struct *next) +{ + perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); + + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&perf_sched_events), struct static_key_true)) branch = arch_static_branch_jump(&(&perf_sched_events)->key, false); else if (__builtin_types_compatible_p(typeof(*&perf_sched_events), struct static_key_false)) branch = arch_static_branch(&(&perf_sched_events)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); })) + __perf_event_task_sched_out(prev, next); +} + +extern void perf_event_mmap(struct vm_area_struct *vma); + +extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, + bool unregister, const char *sym); +extern void perf_event_bpf_event(struct bpf_prog *prog, + enum perf_bpf_event_type type, + u16 flags); + +extern struct perf_guest_info_callbacks *perf_guest_cbs; +extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); +extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); + +extern void perf_event_exec(void); +extern void perf_event_comm(struct task_struct *tsk, bool exec); +extern void perf_event_namespaces(struct task_struct *tsk); +extern void perf_event_fork(struct task_struct *tsk); + + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_perf_callchain_entry; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct perf_callchain_entry) perf_callchain_entry; + +extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); +extern struct perf_callchain_entry * +get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, + u32 max_stack, bool crosstask, bool add_mark); +extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); +extern int get_callchain_buffers(int max_stack); +extern void put_callchain_buffers(void); + +extern int sysctl_perf_event_max_stack; +extern int sysctl_perf_event_max_contexts_per_stack; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) +{ + if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { + struct perf_callchain_entry *entry = ctx->entry; + entry->ip[entry->nr++] = ip; + ++ctx->contexts; + return 0; + } else { + ctx->contexts_maxed = true; + return -1; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) +{ + if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { + struct perf_callchain_entry *entry = ctx->entry; + entry->ip[entry->nr++] = ip; + ++ctx->nr; + return 0; + } else { + return -1; + } +} + +extern int sysctl_perf_event_paranoid; +extern int sysctl_perf_event_mlock; +extern int sysctl_perf_event_sample_rate; +extern int sysctl_perf_cpu_time_max_percent; + +extern void perf_sample_event_took(u64 sample_len_ns); + +int perf_proc_update_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int perf_event_max_stack_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +# 1298 "./include/linux/perf_event.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int perf_is_paranoid(void) +{ + return sysctl_perf_event_paranoid > -1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int perf_allow_kernel(struct perf_event_attr *attr) +{ + if (sysctl_perf_event_paranoid > 1 && !perfmon_capable()) + return -13; + + return security_perf_event_open(attr, 2); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int perf_allow_cpu(struct perf_event_attr *attr) +{ + if (sysctl_perf_event_paranoid > 0 && !perfmon_capable()) + return -13; + + return security_perf_event_open(attr, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int perf_allow_tracepoint(struct perf_event_attr *attr) +{ + if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) + return -1; + + return security_perf_event_open(attr, 3); +} + +extern void perf_event_init(void); +extern void perf_tp_event(u16 event_type, u64 count, void *record, + int entry_size, struct pt_regs *regs, + struct hlist_head *head, int rctx, + struct task_struct *task); +extern void perf_bp_event(struct perf_event *event, void *data); +# 1343 "./include/linux/perf_event.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool has_branch_stack(struct perf_event *event) +{ + return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool needs_branch_stack(struct perf_event *event) +{ + return event->attr.branch_sample_type != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool has_aux(struct perf_event *event) +{ + return event->pmu->setup_aux; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_write_backward(struct perf_event *event) +{ + return !!event->attr.write_backward; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool has_addr_filter(struct perf_event *event) +{ + return event->pmu->nr_addr_filters; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct perf_addr_filters_head * +perf_event_addr_filters(struct perf_event *event) +{ + struct perf_addr_filters_head *ifh = &event->addr_filters; + + if (event->parent) + ifh = &event->parent->addr_filters; + + return ifh; +} + +extern void perf_event_addr_filters_sync(struct perf_event *event); + +extern int perf_output_begin(struct perf_output_handle *handle, + struct perf_event *event, unsigned int size); +extern int perf_output_begin_forward(struct perf_output_handle *handle, + struct perf_event *event, + unsigned int size); +extern int perf_output_begin_backward(struct perf_output_handle *handle, + struct perf_event *event, + unsigned int size); + +extern void perf_output_end(struct perf_output_handle *handle); +extern unsigned int perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len); +extern unsigned int perf_output_skip(struct perf_output_handle *handle, + unsigned int len); +extern long perf_output_copy_aux(struct perf_output_handle *aux_handle, + struct perf_output_handle *handle, + unsigned long from, unsigned long to); +extern int perf_swevent_get_recursion_context(void); +extern void perf_swevent_put_recursion_context(int rctx); +extern u64 perf_swevent_set_period(struct perf_event *event); +extern void perf_event_enable(struct perf_event *event); +extern void perf_event_disable(struct perf_event *event); +extern void perf_event_disable_local(struct perf_event *event); +extern void perf_event_disable_inatomic(struct perf_event *event); +extern void perf_event_task_tick(void); +extern int perf_event_account_interrupt(struct perf_event *event); +extern int perf_event_period(struct perf_event *event, u64 value); +extern u64 perf_event_pause(struct perf_event *event, bool reset); +# 1502 "./include/linux/perf_event.h" +extern void perf_restore_debug_store(void); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool perf_raw_frag_last(const struct perf_raw_frag *frag) +{ + return frag->pad < sizeof(u64); +} + + + +struct perf_pmu_events_attr { + struct device_attribute attr; + u64 id; + const char *event_str; +}; + +struct perf_pmu_events_ht_attr { + struct device_attribute attr; + u64 id; + const char *event_str_ht; + const char *event_str_noht; +}; + +ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, + char *page); +# 1557 "./include/linux/perf_event.h" +int perf_event_init_cpu(unsigned int cpu); +int perf_event_exit_cpu(unsigned int cpu); + + + + + +extern void __attribute__((__weak__)) arch_perf_update_userpage(struct perf_event *event, + struct perf_event_mmap_page *userpg, + u64 now); +# 11 "./include/linux/trace_events.h" 2 +# 1 "./include/linux/tracepoint.h" 1 +# 12 "./include/linux/trace_events.h" 2 + +struct trace_array; +struct array_buffer; +struct tracer; +struct dentry; +struct bpf_prog; + +const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, + unsigned long flags, + const struct trace_print_flags *flag_array); + +const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, + const struct trace_print_flags *symbol_array); +# 37 "./include/linux/trace_events.h" +const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, + unsigned int bitmask_size); + +const char *trace_print_hex_seq(struct trace_seq *p, + const unsigned char *buf, int len, + bool concatenate); + +const char *trace_print_array_seq(struct trace_seq *p, + const void *buf, int count, + size_t el_size); + +const char * +trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); + +struct trace_iterator; +struct trace_event; + +int trace_raw_output_prep(struct trace_iterator *iter, + struct trace_event *event); + + + + + + + +struct trace_entry { + unsigned short type; + unsigned char flags; + unsigned char preempt_count; + int pid; +}; +# 79 "./include/linux/trace_events.h" +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + struct array_buffer *array_buffer; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter **buffer_iter; + unsigned long iter_flags; + void *temp; + unsigned int temp_size; + + + struct trace_seq tmp_seq; + + cpumask_var_t started; + + + bool snapshot; + + + struct trace_seq seq; + struct trace_entry *ent; + unsigned long lost_events; + int leftover; + int ent_size; + int cpu; + u64 ts; + + loff_t pos; + long idx; + + +}; + +enum trace_iter_flags { + TRACE_FILE_LAT_FMT = 1, + TRACE_FILE_ANNOTATE = 2, + TRACE_FILE_TIME_IN_NS = 4, +}; + + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, + int flags, struct trace_event *event); + +struct trace_event_functions { + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +struct trace_event { + struct hlist_node node; + struct list_head list; + int type; + struct trace_event_functions *funcs; +}; + +extern int register_trace_event(struct trace_event *event); +extern int unregister_trace_event(struct trace_event *event); + + +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, + TRACE_TYPE_NO_CONSUME = 3 +}; + +enum print_line_t trace_handle_return(struct trace_seq *s); + +void tracing_generic_entry_update(struct trace_entry *entry, + unsigned short type, + unsigned long flags, + int pc); +struct trace_event_file; + +struct ring_buffer_event * +trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer, + struct trace_event_file *trace_file, + int type, unsigned long len, + unsigned long flags, int pc); + + + + +void tracing_record_taskinfo(struct task_struct *task, int flags); +void tracing_record_taskinfo_sched_switch(struct task_struct *prev, + struct task_struct *next, int flags); + +void tracing_record_cmdline(struct task_struct *task); +void tracing_record_tgid(struct task_struct *task); + +int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); + +struct event_filter; + +enum trace_reg { + TRACE_REG_REGISTER, + TRACE_REG_UNREGISTER, + + TRACE_REG_PERF_REGISTER, + TRACE_REG_PERF_UNREGISTER, + TRACE_REG_PERF_OPEN, + TRACE_REG_PERF_CLOSE, + + + + + + TRACE_REG_PERF_ADD, + TRACE_REG_PERF_DEL, + +}; + +struct trace_event_call; + + + +struct trace_event_fields { + const char *type; + union { + struct { + const char *name; + const int size; + const int align; + const int is_signed; + const int filter_type; + }; + int (*define_fields)(struct trace_event_call *); + }; +}; + +struct trace_event_class { + const char *system; + void *probe; + + void *perf_probe; + + int (*reg)(struct trace_event_call *event, + enum trace_reg type, void *data); + struct trace_event_fields *fields_array; + struct list_head *(*get_fields)(struct trace_event_call *); + struct list_head fields; + int (*raw_init)(struct trace_event_call *); +}; + +extern int trace_event_reg(struct trace_event_call *event, + enum trace_reg type, void *data); + +struct trace_event_buffer { + struct trace_buffer *buffer; + struct ring_buffer_event *event; + struct trace_event_file *trace_file; + void *entry; + unsigned long flags; + int pc; + struct pt_regs *regs; +}; + +void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, + struct trace_event_file *trace_file, + unsigned long len); + +void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); + +enum { + TRACE_EVENT_FL_FILTERED_BIT, + TRACE_EVENT_FL_CAP_ANY_BIT, + TRACE_EVENT_FL_NO_SET_FILTER_BIT, + TRACE_EVENT_FL_IGNORE_ENABLE_BIT, + TRACE_EVENT_FL_TRACEPOINT_BIT, + TRACE_EVENT_FL_KPROBE_BIT, + TRACE_EVENT_FL_UPROBE_BIT, +}; +# 266 "./include/linux/trace_events.h" +enum { + TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), + TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), + TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), + TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), + TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), + TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), + TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), +}; + + + +struct trace_event_call { + struct list_head list; + struct trace_event_class *class; + union { + char *name; + + struct tracepoint *tp; + }; + struct trace_event event; + char *print_fmt; + struct event_filter *filter; + void *mod; + void *data; +# 300 "./include/linux/trace_events.h" + int flags; + + + int perf_refcount; + struct hlist_head *perf_events; + struct bpf_prog_array *prog_array; + + int (*perf_perm)(struct trace_event_call *, + struct perf_event *); + +}; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_prog_array_valid(struct trace_event_call *call) +{ +# 332 "./include/linux/trace_events.h" + return !!({ do { extern void __compiletime_assert_1025(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(call->prog_array) == sizeof(char) || sizeof(call->prog_array) == sizeof(short) || sizeof(call->prog_array) == sizeof(int) || sizeof(call->prog_array) == sizeof(long)) || sizeof(call->prog_array) == sizeof(long long))) __compiletime_assert_1025(); } while (0); ({ typeof( _Generic((call->prog_array), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (call->prog_array))) __x = (*(const volatile typeof( _Generic((call->prog_array), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (call->prog_array))) *)&(call->prog_array)); do { } while (0); (typeof(call->prog_array))__x; }); }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char * +trace_event_name(struct trace_event_call *call) +{ + if (call->flags & TRACE_EVENT_FL_TRACEPOINT) + return call->tp ? call->tp->name : ((void *)0); + else + return call->name; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct list_head * +trace_get_fields(struct trace_event_call *event_call) +{ + if (!event_call->class->get_fields) + return &event_call->class->fields; + return event_call->class->get_fields(event_call); +} + +struct trace_array; +struct trace_subsystem_dir; + +enum { + EVENT_FILE_FL_ENABLED_BIT, + EVENT_FILE_FL_RECORDED_CMD_BIT, + EVENT_FILE_FL_RECORDED_TGID_BIT, + EVENT_FILE_FL_FILTERED_BIT, + EVENT_FILE_FL_NO_SET_FILTER_BIT, + EVENT_FILE_FL_SOFT_MODE_BIT, + EVENT_FILE_FL_SOFT_DISABLED_BIT, + EVENT_FILE_FL_TRIGGER_MODE_BIT, + EVENT_FILE_FL_TRIGGER_COND_BIT, + EVENT_FILE_FL_PID_FILTER_BIT, + EVENT_FILE_FL_WAS_ENABLED_BIT, +}; + +extern struct trace_event_file *trace_get_event_file(const char *instance, + const char *system, + const char *event); +extern void trace_put_event_file(struct trace_event_file *file); + + + +enum dynevent_type { + DYNEVENT_TYPE_SYNTH = 1, + DYNEVENT_TYPE_KPROBE, + DYNEVENT_TYPE_NONE, +}; + +struct dynevent_cmd; + +typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd); + +struct dynevent_cmd { + struct seq_buf seq; + const char *event_name; + unsigned int n_fields; + enum dynevent_type type; + dynevent_create_fn_t run_command; + void *private_data; +}; + +extern int dynevent_create(struct dynevent_cmd *cmd); + +extern int synth_event_delete(const char *name); + +extern void synth_event_cmd_init(struct dynevent_cmd *cmd, + char *buf, int maxlen); + +extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, + const char *name, + struct module *mod, ...); + + + + +struct synth_field_desc { + const char *type; + const char *name; +}; + +extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, + const char *name, + struct module *mod, + struct synth_field_desc *fields, + unsigned int n_fields); +extern int synth_event_create(const char *name, + struct synth_field_desc *fields, + unsigned int n_fields, struct module *mod); + +extern int synth_event_add_field(struct dynevent_cmd *cmd, + const char *type, + const char *name); +extern int synth_event_add_field_str(struct dynevent_cmd *cmd, + const char *type_name); +extern int synth_event_add_fields(struct dynevent_cmd *cmd, + struct synth_field_desc *fields, + unsigned int n_fields); + + + + +struct synth_event; + +struct synth_event_trace_state { + struct trace_event_buffer fbuffer; + struct synth_trace_event *entry; + struct trace_buffer *buffer; + struct synth_event *event; + unsigned int cur_field; + unsigned int n_u64; + bool disabled; + bool add_next; + bool add_name; +}; + +extern int synth_event_trace(struct trace_event_file *file, + unsigned int n_vals, ...); +extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals, + unsigned int n_vals); +extern int synth_event_trace_start(struct trace_event_file *file, + struct synth_event_trace_state *trace_state); +extern int synth_event_add_next_val(u64 val, + struct synth_event_trace_state *trace_state); +extern int synth_event_add_val(const char *field_name, u64 val, + struct synth_event_trace_state *trace_state); +extern int synth_event_trace_end(struct synth_event_trace_state *trace_state); + +extern int kprobe_event_delete(const char *name); + +extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd, + char *buf, int maxlen); + + + + + + + +extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, + bool kretprobe, + const char *name, + const char *loc, ...); + + + + + + + +extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...); +# 507 "./include/linux/trace_events.h" +enum { + EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), + EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT), + EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT), + EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), + EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), + EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), + EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), + EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), + EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), + EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), + EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT), +}; + +struct trace_event_file { + struct list_head list; + struct trace_event_call *event_call; + struct event_filter *filter; + struct dentry *dir; + struct trace_array *tr; + struct trace_subsystem_dir *system; + struct list_head triggers; +# 546 "./include/linux/trace_events.h" + unsigned long flags; + atomic_t sm_ref; + atomic_t tm_ref; +}; +# 576 "./include/linux/trace_events.h" +enum event_trigger_type { + ETT_NONE = (0), + ETT_TRACE_ONOFF = (1 << 0), + ETT_SNAPSHOT = (1 << 1), + ETT_STACKTRACE = (1 << 2), + ETT_EVENT_ENABLE = (1 << 3), + ETT_EVENT_HIST = (1 << 4), + ETT_HIST_ENABLE = (1 << 5), +}; + +extern int filter_match_preds(struct event_filter *filter, void *rec); + +extern enum event_trigger_type +event_triggers_call(struct trace_event_file *file, void *rec, + struct ring_buffer_event *event); +extern void +event_triggers_post_call(struct trace_event_file *file, + enum event_trigger_type tt); + +bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); +# 606 "./include/linux/trace_events.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +trace_trigger_soft_disabled(struct trace_event_file *file) +{ + unsigned long eflags = file->flags; + + if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { + if (eflags & EVENT_FILE_FL_TRIGGER_MODE) + event_triggers_call(file, ((void *)0), ((void *)0)); + if (eflags & EVENT_FILE_FL_SOFT_DISABLED) + return true; + if (eflags & EVENT_FILE_FL_PID_FILTER) + return trace_event_ignore_this_pid(file); + } + return false; +} + + +unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); +int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); +void perf_event_detach_bpf_prog(struct perf_event *event); +int perf_event_query_prog_array(struct perf_event *event, void *info); +int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); +int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); +struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); +void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); +int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, + u32 *fd_type, const char **buf, + u64 *probe_offset, u64 *probe_addr); +# 677 "./include/linux/trace_events.h" +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING, + FILTER_DYN_STRING, + FILTER_PTR_STRING, + FILTER_TRACE_FN, + FILTER_COMM, + FILTER_CPU, +}; + +extern int trace_event_raw_init(struct trace_event_call *call); +extern int trace_define_field(struct trace_event_call *call, const char *type, + const char *name, int offset, int size, + int is_signed, int filter_type); +extern int trace_add_event_call(struct trace_event_call *call); +extern int trace_remove_event_call(struct trace_event_call *call); +extern int trace_event_get_offsets(struct trace_event_call *call); + + + +int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); +int trace_set_clr_event(const char *system, const char *event, int set); +int trace_array_set_clr_event(struct trace_array *tr, const char *system, + const char *event, bool enable); +# 721 "./include/linux/trace_events.h" +struct perf_event; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_perf_trace_regs; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs) perf_trace_regs; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_bpf_kprobe_override; extern __attribute__((section(".data..percpu" ""))) __typeof__(int) bpf_kprobe_override; + +extern int perf_trace_init(struct perf_event *event); +extern void perf_trace_destroy(struct perf_event *event); +extern int perf_trace_add(struct perf_event *event, int flags); +extern void perf_trace_del(struct perf_event *event, int flags); + +extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe); +extern void perf_kprobe_destroy(struct perf_event *event); +extern int bpf_get_kprobe_info(const struct perf_event *event, + u32 *fd_type, const char **symbol, + u64 *probe_offset, u64 *probe_addr, + bool perf_type_tracepoint); + + +extern int perf_uprobe_init(struct perf_event *event, + unsigned long ref_ctr_offset, bool is_retprobe); +extern void perf_uprobe_destroy(struct perf_event *event); +extern int bpf_get_uprobe_info(const struct perf_event *event, + u32 *fd_type, const char **filename, + u64 *probe_offset, bool perf_type_tracepoint); + +extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, + char *filter_str); +extern void ftrace_profile_free_filter(struct perf_event *event); +void perf_trace_buf_update(void *record, u16 type); +void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); + +void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); +void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); +void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3); +void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4); +void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5); +void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6); +void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); +void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8); +void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9); +void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10); +void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10, u64 arg11); +void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2, + u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, + u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); +void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, + struct trace_event_call *call, u64 count, + struct pt_regs *regs, struct hlist_head *head, + struct task_struct *task); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, + u64 count, struct pt_regs *regs, void *head, + struct task_struct *task) +{ + perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); +} +# 8 "./include/trace/syscall.h" 2 +# 25 "./include/trace/syscall.h" +struct syscall_metadata { + const char *name; + int syscall_nr; + int nb_args; + const char **types; + const char **args; + struct list_head enter_fields; + + struct trace_event_call *enter_event; + struct trace_event_call *exit_event; +}; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void syscall_tracepoint_update(struct task_struct *p) +{ + if (test_ti_thread_flag(((struct thread_info *)get_current()), 28)) + set_tsk_thread_flag(p, 28); + else + clear_tsk_thread_flag(p, 28); +} +# 86 "./include/linux/syscalls.h" 2 +# 130 "./include/linux/syscalls.h" +extern struct trace_event_class event_class_syscall_enter; +extern struct trace_event_class event_class_syscall_exit; +extern struct trace_event_functions enter_syscall_print_funcs; +extern struct trace_event_functions exit_syscall_print_funcs; +# 191 "./include/linux/syscalls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_syscall_trace_event(struct trace_event_call *tp_event) +{ + return tp_event->class == &event_class_syscall_enter || + tp_event->class == &event_class_syscall_exit; +} +# 259 "./include/linux/syscalls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void addr_limit_user_check(void) +{ + + if (!test_ti_thread_flag(((struct thread_info *)get_current()), 31)) + return; + + + if (check_data_corruption(({ bool corruption = __builtin_expect(!!(!(((get_current()->thread.addr_limit)).seg == (((mm_segment_t) { (((1UL << ((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+ 2) & 31))|(1<<(( 3*32+ 3) & 31))|(1<<(( 3*32+ 1) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & (0|0|0|0) )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 0 : ( __builtin_constant_p((__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p((16*32+16)) && ( ((((16*32+16))>>5)==(0) && (1UL<<(((16*32+16))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || ((((16*32+16))>>5)==(1) && (1UL<<(((16*32+16))&31) & ((1<<(( 1*32+29) & 31))|0) )) || ((((16*32+16))>>5)==(2) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(3) && (1UL<<(((16*32+16))&31) & ((1<<(( 3*32+20) & 31))) )) || ((((16*32+16))>>5)==(4) && (1UL<<(((16*32+16))&31) & (0) )) || ((((16*32+16))>>5)==(5) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(6) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(7) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(8) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(9) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(10) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(11) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(12) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(13) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(14) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(15) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(16) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(17) && (1UL<<(((16*32+16))&31) & 0 )) || ((((16*32+16))>>5)==(18) && (1UL<<(((16*32+16))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit((16*32+16), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has((16*32+16)) )) ? 56 : 47)) - ((1UL) << 12))) })).seg)), 0); if (corruption) { if (1) { printk("\001" "3" "Invalid address limit on user-mode return"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1026)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/syscalls.h"), "i" (266), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1027)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } else ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1028)); }); __warn_printk("Invalid address limit on user-mode return"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1029)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/syscalls.h"), "i" (266), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1030)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1031)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1032)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } corruption; })) + ) + force_sig(9); + + + clear_ti_thread_flag(((struct thread_info *)get_current()), 31); + +} +# 1239 "./include/linux/syscalls.h" +int ksys_umount(char *name, int flags); +int ksys_dup(unsigned int fildes); +int ksys_chroot(const char *filename); +ssize_t ksys_write(unsigned int fd, const char *buf, size_t count); +int ksys_chdir(const char *filename); +int ksys_fchmod(unsigned int fd, umode_t mode); +int ksys_fchown(unsigned int fd, uid_t user, gid_t group); +int ksys_getdents64(unsigned int fd, struct linux_dirent64 *dirent, + unsigned int count); +int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); +off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence); +ssize_t ksys_read(unsigned int fd, char *buf, size_t count); +void ksys_sync(void); +int ksys_unshare(unsigned long unshare_flags); +int ksys_setsid(void); +int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes, + unsigned int flags); +ssize_t ksys_pread64(unsigned int fd, char *buf, size_t count, + loff_t pos); +ssize_t ksys_pwrite64(unsigned int fd, const char *buf, + size_t count, loff_t pos); +int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len); + +int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); + + + + + + + +unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +ssize_t ksys_readahead(int fd, loff_t offset, size_t count); +int ksys_ipc(unsigned int call, int first, unsigned long second, + unsigned long third, void * ptr, long fifth); +int compat_ksys_ipc(u32 call, int first, int second, + u32 third, u32 ptr, u32 fifth); + + + + + +extern long do_unlinkat(int dfd, struct filename *name); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_unlink(const char *pathname) +{ + return do_unlinkat(-100, getname(pathname)); +} + +extern long do_rmdir(int dfd, const char *pathname); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_rmdir(const char *pathname) +{ + return do_rmdir(-100, pathname); +} + +extern long do_mkdirat(int dfd, const char *pathname, umode_t mode); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_mkdir(const char *pathname, umode_t mode) +{ + return do_mkdirat(-100, pathname, mode); +} + +extern long do_symlinkat(const char *oldname, int newdfd, + const char *newname); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_symlink(const char *oldname, + const char *newname) +{ + return do_symlinkat(oldname, -100, newname); +} + +extern long do_mknodat(int dfd, const char *filename, umode_t mode, + unsigned int dev); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_mknod(const char *filename, umode_t mode, + unsigned int dev) +{ + return do_mknodat(-100, filename, mode, dev); +} + +extern int do_linkat(int olddfd, const char *oldname, int newdfd, + const char *newname, int flags); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_link(const char *oldname, + const char *newname) +{ + return do_linkat(-100, oldname, -100, newname, 0); +} + +extern int do_fchmodat(int dfd, const char *filename, umode_t mode); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ksys_chmod(const char *filename, umode_t mode) +{ + return do_fchmodat(-100, filename, mode); +} + +long do_faccessat(int dfd, const char *filename, int mode, int flags); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_access(const char *filename, int mode) +{ + return do_faccessat(-100, filename, mode, 0); +} + +extern int do_fchownat(int dfd, const char *filename, uid_t user, + gid_t group, int flag); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_chown(const char *filename, uid_t user, + gid_t group) +{ + return do_fchownat(-100, filename, user, group, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_lchown(const char *filename, uid_t user, + gid_t group) +{ + return do_fchownat(-100, filename, user, group, + 0x100); +} + +extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_ftruncate(unsigned int fd, unsigned long length) +{ + return do_sys_ftruncate(fd, length, 1); +} + +extern int __close_fd(struct files_struct *files, unsigned int fd); + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ksys_close(unsigned int fd) +{ + return __close_fd(get_current()->files, fd); +} + +extern long do_sys_open(int dfd, const char *filename, int flags, + umode_t mode); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_open(const char *filename, int flags, + umode_t mode) +{ + if ((!0)) + flags |= 00100000; + return do_sys_open(-100, filename, flags, mode); +} + +extern long do_sys_truncate(const char *pathname, loff_t length); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long ksys_truncate(const char *pathname, loff_t length) +{ + return do_sys_truncate(pathname, length); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int ksys_personality(unsigned int personality) +{ + unsigned int old = get_current()->personality; + + if (personality != 0xffffffff) + (get_current()->personality = (personality)); + + return old; +} + + +long ksys_semtimedop(int semid, struct sembuf *tsops, + unsigned int nsops, + const struct __kernel_timespec *timeout); +long ksys_semget(key_t key, int nsems, int semflg); +long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); +long ksys_msgget(key_t key, int msgflg); +long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds *buf); +long ksys_msgrcv(int msqid, struct msgbuf *msgp, size_t msgsz, + long msgtyp, int msgflg); +long ksys_msgsnd(int msqid, struct msgbuf *msgp, size_t msgsz, + int msgflg); +long ksys_shmget(key_t key, size_t size, int shmflg); +long ksys_shmdt(char *shmaddr); +long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds *buf); +long compat_ksys_semtimedop(int semid, struct sembuf *tsems, + unsigned int nsops, + const struct old_timespec32 *timeout); +# 46 "fs/io_uring.c" 2 + +# 1 "./include/net/compat.h" 1 + + + + + +struct sock; + + + + + +struct compat_msghdr { + compat_uptr_t msg_name; + compat_int_t msg_namelen; + compat_uptr_t msg_iov; + compat_size_t msg_iovlen; + compat_uptr_t msg_control; + compat_size_t msg_controllen; + compat_uint_t msg_flags; +}; + +struct compat_mmsghdr { + struct compat_msghdr msg_hdr; + compat_uint_t msg_len; +}; + +struct compat_cmsghdr { + compat_size_t cmsg_len; + compat_int_t cmsg_level; + compat_int_t cmsg_type; +}; + +struct compat_rtentry { + u32 rt_pad1; + struct sockaddr rt_dst; + struct sockaddr rt_gateway; + struct sockaddr rt_genmask; + unsigned short rt_flags; + short rt_pad2; + u32 rt_pad3; + unsigned char rt_tos; + unsigned char rt_class; + short rt_pad4; + short rt_metric; + compat_uptr_t rt_dev; + u32 rt_mtu; + u32 rt_window; + unsigned short rt_irtt; +}; +# 59 "./include/net/compat.h" +int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr *umsg, + struct sockaddr **save_addr, compat_uptr_t *ptr, + compat_size_t *len); +int get_compat_msghdr(struct msghdr *, struct compat_msghdr *, + struct sockaddr **, struct iovec **); +struct sock_fprog *get_compat_bpf_fprog(char *optval); +int put_cmsg_compat(struct msghdr*, int, int, int, void *); + +int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, + unsigned char *, int); + +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group + __attribute__((__aligned__(4))); +} __attribute__((__packed__)); + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group + __attribute__((__aligned__(4))); + struct __kernel_sockaddr_storage gsr_source + __attribute__((__aligned__(4))); +} __attribute__((__packed__)); + +struct compat_group_filter { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group + __attribute__((__aligned__(4))); + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist[1] + __attribute__((__aligned__(4))); +} __attribute__((__packed__)); +# 48 "fs/io_uring.c" 2 + + + + + + + +# 1 "./include/linux/fdtable.h" 1 +# 13 "./include/linux/fdtable.h" +# 1 "./include/linux/nospec.h" 1 +# 10 "./include/linux/nospec.h" +struct task_struct; +# 62 "./include/linux/nospec.h" +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl); + +void arch_seccomp_spec_mitigate(struct task_struct *task); +# 14 "./include/linux/fdtable.h" 2 +# 26 "./include/linux/fdtable.h" +struct fdtable { + unsigned int max_fds; + struct file **fd; + unsigned long *close_on_exec; + unsigned long *open_fds; + unsigned long *full_fds_bits; + struct callback_head rcu; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool close_on_exec(unsigned int fd, const struct fdtable *fdt) +{ + return test_bit(fd, fdt->close_on_exec); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fd_is_open(unsigned int fd, const struct fdtable *fdt) +{ + return test_bit(fd, fdt->open_fds); +} + + + + +struct files_struct { + + + + atomic_t count; + bool resize_in_progress; + wait_queue_head_t resize_wait; + + struct fdtable *fdt; + struct fdtable fdtab; + + + + spinlock_t file_lock __attribute__((__aligned__((1 << (6))))); + unsigned int next_fd; + unsigned long close_on_exec_init[1]; + unsigned long open_fds_init[1]; + unsigned long full_fds_bits_init[1]; + struct file * fd_array[64]; +}; + +struct file_operations; +struct vfsmount; +struct dentry; +# 82 "./include/linux/fdtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct file *__fcheck_files(struct files_struct *files, unsigned int fd) +{ + struct fdtable *fdt = ({ typeof(files->fdt) ________p1 = ({ do { extern void __compiletime_assert_1033(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(files->fdt) == sizeof(char) || sizeof(files->fdt) == sizeof(short) || sizeof(files->fdt) == sizeof(int) || sizeof(files->fdt) == sizeof(long)) || sizeof(files->fdt) == sizeof(long long))) __compiletime_assert_1033(); } while (0); ({ typeof( _Generic((files->fdt), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (files->fdt))) __x = (*(const volatile typeof( _Generic((files->fdt), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (files->fdt))) *)&(files->fdt)); do { } while (0); (typeof(files->fdt))__x; }); }); ((typeof(*files->fdt) *)(________p1)); }); + + if (fd < fdt->max_fds) { + fd = ({ typeof(fd) _i = (fd); typeof(fdt->max_fds) _s = (fdt->max_fds); unsigned long _mask = array_index_mask_nospec(_i, _s); do { extern void __compiletime_assert_1034(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(_i) > sizeof(long)"))); if (!(!(sizeof(_i) > sizeof(long)))) __compiletime_assert_1034(); } while (0); do { extern void __compiletime_assert_1035(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(_s) > sizeof(long)"))); if (!(!(sizeof(_s) > sizeof(long)))) __compiletime_assert_1035(); } while (0); (typeof(_i)) (_i & _mask); }); + return ({ typeof(fdt->fd[fd]) ________p1 = ({ do { extern void __compiletime_assert_1036(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(fdt->fd[fd]) == sizeof(char) || sizeof(fdt->fd[fd]) == sizeof(short) || sizeof(fdt->fd[fd]) == sizeof(int) || sizeof(fdt->fd[fd]) == sizeof(long)) || sizeof(fdt->fd[fd]) == sizeof(long long))) __compiletime_assert_1036(); } while (0); ({ typeof( _Generic((fdt->fd[fd]), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (fdt->fd[fd]))) __x = (*(const volatile typeof( _Generic((fdt->fd[fd]), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (fdt->fd[fd]))) *)&(fdt->fd[fd])); do { } while (0); (typeof(fdt->fd[fd]))__x; }); }); ((typeof(*fdt->fd[fd]) *)(________p1)); }); + } + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct file *fcheck_files(struct files_struct *files, unsigned int fd) +{ + do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!rcu_read_lock_held() && !lock_is_held(&(&files->file_lock)->dep_map))) { __warned = true; lockdep_rcu_suspicious("include/linux/fdtable.h", 95, "suspicious rcu_dereference_check() usage"); } } while (0) + + ; + return __fcheck_files(files, fd); +} + + + + + + +struct task_struct; + +struct files_struct *get_files_struct(struct task_struct *); +void put_files_struct(struct files_struct *fs); +void reset_files_struct(struct files_struct *); +int unshare_files(struct files_struct **); +struct files_struct *dup_fd(struct files_struct *, int *) ; +void do_close_on_exec(struct files_struct *); +int iterate_fd(struct files_struct *, unsigned, + int (*)(const void *, struct file *, unsigned), + const void *); + +extern int __alloc_fd(struct files_struct *files, + unsigned start, unsigned end, unsigned flags); +extern void __fd_install(struct files_struct *files, + unsigned int fd, struct file *file); +extern int __close_fd(struct files_struct *files, + unsigned int fd); +extern int __close_fd_get_file(unsigned int fd, struct file **res); + +extern struct kmem_cache *files_cachep; +# 56 "fs/io_uring.c" 2 + +# 1 "./include/linux/mman.h" 1 +# 9 "./include/linux/mman.h" +# 1 "./include/uapi/linux/mman.h" 1 + + + + +# 1 "./arch/x86/include/uapi/asm/mman.h" 1 +# 29 "./arch/x86/include/uapi/asm/mman.h" +# 1 "./include/uapi/asm-generic/mman.h" 1 + + + + +# 1 "./include/uapi/asm-generic/mman-common.h" 1 +# 6 "./include/uapi/asm-generic/mman.h" 2 +# 30 "./arch/x86/include/uapi/asm/mman.h" 2 +# 6 "./include/uapi/linux/mman.h" 2 +# 10 "./include/linux/mman.h" 2 +# 53 "./include/linux/mman.h" +extern int sysctl_overcommit_memory; +extern int sysctl_overcommit_ratio; +extern unsigned long sysctl_overcommit_kbytes; +extern struct percpu_counter vm_committed_as; + + +extern s32 vm_committed_as_batch; + + + + +unsigned long vm_memory_committed(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vm_acct_memory(long pages) +{ + percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vm_unacct_memory(long pages) +{ + vm_acct_memory(-pages); +} +# 95 "./include/linux/mman.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_validate_prot(unsigned long prot, unsigned long addr) +{ + return (prot & ~(0x1 | 0x2 | 0x4 | 0x8)) == 0; +} +# 116 "./include/linux/mman.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +calc_vm_prot_bits(unsigned long prot, unsigned long pkey) +{ + return ((!(0x1) || !(0x00000001)) ? 0 : ((0x1) <= (0x00000001) ? ((prot) & (0x1)) * ((0x00000001) / (0x1)) : ((prot) & (0x1)) / ((0x1) / (0x00000001)))) | + ((!(0x2) || !(0x00000002)) ? 0 : ((0x2) <= (0x00000002) ? ((prot) & (0x2)) * ((0x00000002) / (0x2)) : ((prot) & (0x2)) / ((0x2) / (0x00000002)))) | + ((!(0x4) || !(0x00000004)) ? 0 : ((0x4) <= (0x00000004) ? ((prot) & (0x4)) * ((0x00000004) / (0x4)) : ((prot) & (0x4)) / ((0x4) / (0x00000004)))) | + ( ((pkey) & 0x1 ? ((((1UL))) << (32)) : 0) | ((pkey) & 0x2 ? ((((1UL))) << (33)) : 0) | ((pkey) & 0x4 ? ((((1UL))) << (34)) : 0) | ((pkey) & 0x8 ? ((((1UL))) << (35)) : 0)); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long +calc_vm_flag_bits(unsigned long flags) +{ + return ((!(0x0100) || !(0x00000100)) ? 0 : ((0x0100) <= (0x00000100) ? ((flags) & (0x0100)) * ((0x00000100) / (0x0100)) : ((flags) & (0x0100)) / ((0x0100) / (0x00000100)))) | + ((!(0x0800) || !(0x00000800)) ? 0 : ((0x0800) <= (0x00000800) ? ((flags) & (0x0800)) * ((0x00000800) / (0x0800)) : ((flags) & (0x0800)) / ((0x0800) / (0x00000800)))) | + ((!(0x2000) || !(0x00002000)) ? 0 : ((0x2000) <= (0x00002000) ? ((flags) & (0x2000)) * ((0x00002000) / (0x2000)) : ((flags) & (0x2000)) / ((0x2000) / (0x00002000)))) | + ((!(0x080000) || !(0x00800000)) ? 0 : ((0x080000) <= (0x00800000) ? ((flags) & (0x080000)) * ((0x00800000) / (0x080000)) : ((flags) & (0x080000)) / ((0x080000) / (0x00800000)))); +} + +unsigned long vm_commit_limit(void); +# 58 "fs/io_uring.c" 2 + + + +# 1 "./include/linux/blkdev.h" 1 + + + + + +# 1 "./include/linux/sched/clock.h" 1 +# 15 "./include/linux/sched/clock.h" +extern unsigned long long __attribute__((no_instrument_function)) sched_clock(void); + + + + +extern u64 running_clock(void); +extern u64 sched_clock_cpu(int cpu); + + +extern void sched_clock_init(void); +# 53 "./include/linux/sched/clock.h" +extern int sched_clock_stable(void); +extern void clear_sched_clock_stable(void); + + + + + +extern u64 __sched_clock_offset; + +extern void sched_clock_tick(void); +extern void sched_clock_tick_stable(void); +extern void sched_clock_idle_sleep_event(void); +extern void sched_clock_idle_wakeup_event(void); +# 77 "./include/linux/sched/clock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 cpu_clock(int cpu) +{ + return sched_clock_cpu(cpu); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 local_clock(void) +{ + return sched_clock_cpu(({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })); +} +# 94 "./include/linux/sched/clock.h" +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +# 7 "./include/linux/blkdev.h" 2 + + + +# 1 "./include/uapi/linux/major.h" 1 +# 11 "./include/linux/blkdev.h" 2 +# 1 "./include/linux/genhd.h" 1 +# 19 "./include/linux/genhd.h" +# 1 "./include/linux/blk_types.h" 1 +# 10 "./include/linux/blk_types.h" +# 1 "./include/linux/bvec.h" 1 +# 27 "./include/linux/bvec.h" +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; + +struct bvec_iter { + sector_t bi_sector; + + unsigned int bi_size; + + unsigned int bi_idx; + + unsigned int bi_bvec_done; + +}; + +struct bvec_iter_all { + struct bio_vec bv; + int idx; + unsigned done; +}; +# 96 "./include/linux/bvec.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bvec_iter_advance(const struct bio_vec *bv, + struct bvec_iter *iter, unsigned bytes) +{ + unsigned int idx = iter->bi_idx; + + if (({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(bytes > iter->bi_size); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1037)); }); __warn_printk("Attempted to advance past end of bvec iter\n"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1038)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/bvec.h"), "i" (101), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1039)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1040)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1041)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }) + ) { + iter->bi_size = 0; + return false; + } + + iter->bi_size -= bytes; + bytes += iter->bi_bvec_done; + + while (bytes && bytes >= bv[idx].bv_len) { + bytes -= bv[idx].bv_len; + idx++; + } + + iter->bi_idx = idx; + iter->bi_bvec_done = bytes; + return true; +} +# 135 "./include/linux/bvec.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) +{ + iter_all->done = 0; + iter_all->idx = 0; + + return &iter_all->bv; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bvec_advance(const struct bio_vec *bvec, + struct bvec_iter_all *iter_all) +{ + struct bio_vec *bv = &iter_all->bv; + + if (iter_all->done) { + bv->bv_page++; + bv->bv_offset = 0; + } else { + bv->bv_page = bvec->bv_page + (bvec->bv_offset >> 12); + bv->bv_offset = bvec->bv_offset & ~(~(((1UL) << 12)-1)); + } + bv->bv_len = __builtin_choose_expr(((!!(sizeof((typeof((unsigned int)(((1UL) << 12) - bv->bv_offset)) *)1 == (typeof((unsigned int)(bvec->bv_len - iter_all->done)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(((1UL) << 12) - bv->bv_offset)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(bvec->bv_len - iter_all->done)) * 0l)) : (int *)8))))), (((unsigned int)(((1UL) << 12) - bv->bv_offset)) < ((unsigned int)(bvec->bv_len - iter_all->done)) ? ((unsigned int)(((1UL) << 12) - bv->bv_offset)) : ((unsigned int)(bvec->bv_len - iter_all->done))), ({ typeof((unsigned int)(((1UL) << 12) - bv->bv_offset)) __UNIQUE_ID___x1042 = ((unsigned int)(((1UL) << 12) - bv->bv_offset)); typeof((unsigned int)(bvec->bv_len - iter_all->done)) __UNIQUE_ID___y1043 = ((unsigned int)(bvec->bv_len - iter_all->done)); ((__UNIQUE_ID___x1042) < (__UNIQUE_ID___y1043) ? (__UNIQUE_ID___x1042) : (__UNIQUE_ID___y1043)); })) + ; + iter_all->done += bv->bv_len; + + if (iter_all->done == bvec->bv_len) { + iter_all->idx++; + iter_all->done = 0; + } +} +# 11 "./include/linux/blk_types.h" 2 + + +struct bio_set; +struct bio; +struct bio_integrity_payload; +struct page; +struct block_device; +struct io_context; +struct cgroup_subsys_state; +typedef void (bio_end_io_t) (struct bio *); +struct bio_crypt_ctx; +# 30 "./include/linux/blk_types.h" +typedef u8 blk_status_t; +# 91 "./include/linux/blk_types.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_path_error(blk_status_t error) +{ + switch (error) { + case (( blk_status_t)1): + case (( blk_status_t)3): + case (( blk_status_t)5): + case (( blk_status_t)6): + case (( blk_status_t)7): + case (( blk_status_t)8): + return false; + } + + + return true; +} +# 125 "./include/linux/blk_types.h" +struct bio_issue { + u64 value; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 __bio_issue_time(u64 time) +{ + return time & ((1ULL << ((64 - 1) - 12)) - 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 bio_issue_time(struct bio_issue *issue) +{ + return __bio_issue_time(issue->value); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) sector_t bio_issue_size(struct bio_issue *issue) +{ + return ((issue->value & (((1ULL << 12) - 1) << ((64 - 1) - 12))) >> ((64 - 1) - 12)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_issue_init(struct bio_issue *issue, + sector_t size) +{ + size &= (1ULL << 12) - 1; + issue->value = ((issue->value & (~((1ULL << (64 - 1)) - 1))) | + (ktime_get_ns() & ((1ULL << ((64 - 1) - 12)) - 1)) | + ((u64)size << ((64 - 1) - 12))); +} + + + + + +struct bio { + struct bio *bi_next; + struct gendisk *bi_disk; + unsigned int bi_opf; + + + + unsigned short bi_flags; + unsigned short bi_ioprio; + unsigned short bi_write_hint; + blk_status_t bi_status; + u8 bi_partno; + atomic_t __bi_remaining; + + struct bvec_iter bi_iter; + + bio_end_io_t *bi_end_io; + + void *bi_private; + + + + + + + + struct blkcg_gq *bi_blkg; + struct bio_issue bi_issue; + + u64 bi_iocost_cost; + + + + + struct bio_crypt_ctx *bi_crypt_context; + + + union { + + struct bio_integrity_payload *bi_integrity; + + }; + + unsigned short bi_vcnt; + + + + + + unsigned short bi_max_vecs; + + atomic_t __bi_cnt; + + struct bio_vec *bi_io_vec; + + struct bio_set *bi_pool; + + + + + + + struct bio_vec bi_inline_vecs[]; +}; + + + + + + +enum { + BIO_NO_PAGE_REF, + BIO_CLONED, + BIO_BOUNCED, + BIO_USER_MAPPED, + BIO_NULL_MAPPED, + BIO_WORKINGSET, + BIO_QUIET, + BIO_CHAIN, + BIO_REFFED, + BIO_THROTTLED, + + BIO_TRACE_COMPLETION, + + BIO_CGROUP_ACCT, + BIO_TRACKED, + BIO_FLAG_LAST +}; +# 273 "./include/linux/blk_types.h" +typedef __u32 blk_mq_req_flags_t; +# 292 "./include/linux/blk_types.h" +enum req_opf { + + REQ_OP_READ = 0, + + REQ_OP_WRITE = 1, + + REQ_OP_FLUSH = 2, + + REQ_OP_DISCARD = 3, + + REQ_OP_SECURE_ERASE = 5, + + REQ_OP_ZONE_RESET = 6, + + REQ_OP_WRITE_SAME = 7, + + REQ_OP_ZONE_RESET_ALL = 8, + + REQ_OP_WRITE_ZEROES = 9, + + REQ_OP_ZONE_OPEN = 10, + + REQ_OP_ZONE_CLOSE = 11, + + REQ_OP_ZONE_FINISH = 12, + + REQ_OP_ZONE_APPEND = 13, + + + REQ_OP_SCSI_IN = 32, + REQ_OP_SCSI_OUT = 33, + + REQ_OP_DRV_IN = 34, + REQ_OP_DRV_OUT = 35, + + REQ_OP_LAST, +}; + +enum req_flag_bits { + __REQ_FAILFAST_DEV = + 8, + __REQ_FAILFAST_TRANSPORT, + __REQ_FAILFAST_DRIVER, + __REQ_SYNC, + __REQ_META, + __REQ_PRIO, + __REQ_NOMERGE, + __REQ_IDLE, + __REQ_INTEGRITY, + __REQ_FUA, + __REQ_PREFLUSH, + __REQ_RAHEAD, + __REQ_BACKGROUND, + __REQ_NOWAIT, + + + + + + + + __REQ_CGROUP_PUNT, + + + __REQ_NOUNMAP, + + __REQ_HIPRI, + + + __REQ_DRV, + __REQ_SWAP, + __REQ_NR_BITS, +}; +# 394 "./include/linux/blk_types.h" +enum stat_group { + STAT_READ, + STAT_WRITE, + STAT_DISCARD, + STAT_FLUSH, + + NR_STAT_GROUPS +}; + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_set_op_attrs(struct bio *bio, unsigned op, + unsigned op_flags) +{ + bio->bi_opf = op | op_flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool op_is_write(unsigned int op) +{ + return (op & 1); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool op_is_flush(unsigned int op) +{ + return op & ((1ULL << __REQ_FUA) | (1ULL << __REQ_PREFLUSH)); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool op_is_sync(unsigned int op) +{ + return (op & ((1 << 8) - 1)) == REQ_OP_READ || + (op & ((1ULL << __REQ_SYNC) | (1ULL << __REQ_FUA) | (1ULL << __REQ_PREFLUSH))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool op_is_discard(unsigned int op) +{ + return (op & ((1 << 8) - 1)) == REQ_OP_DISCARD; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool op_is_zone_mgmt(enum req_opf op) +{ + switch (op & ((1 << 8) - 1)) { + case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_OPEN: + case REQ_OP_ZONE_CLOSE: + case REQ_OP_ZONE_FINISH: + return true; + default: + return false; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int op_stat_group(unsigned int op) +{ + if (op_is_discard(op)) + return STAT_DISCARD; + return op_is_write(op); +} + +typedef unsigned int blk_qc_t; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_qc_t_valid(blk_qc_t cookie) +{ + return cookie != -1U && cookie != -2U; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) +{ + return (cookie & ~(1U << 31)) >> 16; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_qc_t_to_tag(blk_qc_t cookie) +{ + return cookie & ((1u << 16) - 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_qc_t_is_internal(blk_qc_t cookie) +{ + return (cookie & (1U << 31)) != 0; +} + +struct blk_rq_stat { + u64 mean; + u64 min; + u64 max; + u32 nr_samples; + u64 batch; +}; +# 20 "./include/linux/genhd.h" 2 +# 29 "./include/linux/genhd.h" +extern struct device_type part_type; +extern struct class block_class; +# 49 "./include/linux/genhd.h" +struct partition_meta_info { + char uuid[(36 + 1)]; + u8 volname[64]; +}; + +struct hd_struct { + sector_t start_sect; + + + + + + sector_t nr_sects; + + + + unsigned long stamp; + struct disk_stats *dkstats; + struct percpu_ref ref; + + sector_t alignment_offset; + unsigned int discard_alignment; + struct device __dev; + struct kobject *holder_dir; + int policy, partno; + struct partition_meta_info *info; + + int make_it_fail; + + struct rcu_work rcu_work; +}; +# 140 "./include/linux/genhd.h" +enum { + DISK_EVENT_MEDIA_CHANGE = 1 << 0, + DISK_EVENT_EJECT_REQUEST = 1 << 1, +}; + +enum { + + DISK_EVENT_FLAG_POLL = 1 << 0, + + DISK_EVENT_FLAG_UEVENT = 1 << 1, +}; + +struct disk_part_tbl { + struct callback_head callback_head; + int len; + struct hd_struct *last_lookup; + struct hd_struct *part[]; +}; + +struct disk_events; +struct badblocks; + +struct blk_integrity { + const struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; +}; + +struct gendisk { + + + + int major; + int first_minor; + int minors; + + + char disk_name[32]; + + unsigned short events; + unsigned short event_flags; + + + + + + + struct disk_part_tbl *part_tbl; + struct hd_struct part0; + + const struct block_device_operations *fops; + struct request_queue *queue; + void *private_data; + + int flags; + struct rw_semaphore lookup_sem; + struct kobject *slave_dir; + + struct timer_rand_state *random; + atomic_t sync_io; + struct disk_events *ev; + + struct kobject integrity_kobj; + + + struct cdrom_device_info *cdi; + + int node_id; + struct badblocks *bb; + struct lockdep_map lockdep_map; +}; + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct gendisk *part_to_disk(struct hd_struct *part) +{ + if (__builtin_expect(!!(part), 1)) { + if (part->partno) + return ({ void *__mptr = (void *)(((&((part)->__dev))->parent)); do { extern void __compiletime_assert_1044(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(((&((part)->__dev))->parent))), typeof(((struct gendisk *)0)->part0.__dev)) && !__builtin_types_compatible_p(typeof(*(((&((part)->__dev))->parent))), typeof(void))))) __compiletime_assert_1044(); } while (0); ((struct gendisk *)(__mptr - __builtin_offsetof(struct gendisk, part0.__dev))); }); + else + return ({ void *__mptr = (void *)(((&((part)->__dev)))); do { extern void __compiletime_assert_1045(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(((&((part)->__dev))))), typeof(((struct gendisk *)0)->part0.__dev)) && !__builtin_types_compatible_p(typeof(*(((&((part)->__dev))))), typeof(void))))) __compiletime_assert_1045(); } while (0); ((struct gendisk *)(__mptr - __builtin_offsetof(struct gendisk, part0.__dev))); }); + } + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int disk_max_parts(struct gendisk *disk) +{ + if (disk->flags & 0x0040) + return 256; + return disk->minors; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool disk_part_scan_enabled(struct gendisk *disk) +{ + return disk_max_parts(disk) > 1 && + !(disk->flags & 0x0200); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dev_t disk_devt(struct gendisk *disk) +{ + return (((disk->major) << 20) | (disk->first_minor)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dev_t part_devt(struct hd_struct *part) +{ + return (&((part)->__dev))->devt; +} + +extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); +extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void disk_put_part(struct hd_struct *part) +{ + if (__builtin_expect(!!(part), 1)) + put_device((&((part)->__dev))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hd_sects_seq_init(struct hd_struct *p) +{ + + + +} +# 278 "./include/linux/genhd.h" +struct disk_part_iter { + struct gendisk *disk; + struct hd_struct *part; + int idx; + unsigned int flags; +}; + +extern void disk_part_iter_init(struct disk_part_iter *piter, + struct gendisk *disk, unsigned int flags); +extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter); +extern void disk_part_iter_exit(struct disk_part_iter *piter); +extern bool disk_has_partitions(struct gendisk *disk); + + +extern void device_add_disk(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void add_disk(struct gendisk *disk) +{ + device_add_disk(((void *)0), disk, ((void *)0)); +} +extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void add_disk_no_queue_reg(struct gendisk *disk) +{ + device_add_disk_no_queue_reg(((void *)0), disk); +} + +extern void del_gendisk(struct gendisk *gp); +extern struct gendisk *get_gendisk(dev_t dev, int *partno); +extern struct block_device *bdget_disk(struct gendisk *disk, int partno); + +extern void set_device_ro(struct block_device *bdev, int flag); +extern void set_disk_ro(struct gendisk *disk, int flag); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_disk_ro(struct gendisk *disk) +{ + return disk->part0.policy; +} + +extern void disk_block_events(struct gendisk *disk); +extern void disk_unblock_events(struct gendisk *disk); +extern void disk_flush_events(struct gendisk *disk, unsigned int mask); +extern void set_capacity_revalidate_and_notify(struct gendisk *disk, + sector_t size, bool revalidate); +extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); + + +extern void add_disk_randomness(struct gendisk *disk) ; +extern void rand_initialize_disk(struct gendisk *disk); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) sector_t get_start_sect(struct block_device *bdev) +{ + return bdev->bd_part->start_sect; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) sector_t get_capacity(struct gendisk *disk) +{ + return disk->part0.nr_sects; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_capacity(struct gendisk *disk, sector_t size) +{ + disk->part0.nr_sects = size; +} + +extern dev_t blk_lookup_devt(const char *name, int partno); + +int bdev_disk_changed(struct block_device *bdev, bool invalidate); +int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); +int blk_drop_partitions(struct block_device *bdev); +extern void printk_all_partitions(void); + +extern struct gendisk *__alloc_disk_node(int minors, int node_id); +extern struct kobject *get_disk_and_module(struct gendisk *disk); +extern void put_disk(struct gendisk *disk); +extern void put_disk_and_module(struct gendisk *disk); +extern void blk_register_region(dev_t devt, unsigned long range, + struct module *module, + struct kobject *(*probe)(dev_t, int *, void *), + int (*lock)(dev_t, void *), + void *data); +extern void blk_unregister_region(dev_t devt, unsigned long range); +# 12 "./include/linux/blkdev.h" 2 + + + + +# 1 "./include/linux/pagemap.h" 1 +# 11 "./include/linux/pagemap.h" +# 1 "./include/linux/highmem.h" 1 +# 12 "./include/linux/highmem.h" +# 1 "./arch/x86/include/asm/cacheflush.h" 1 + + + + + + + +# 1 "./include/asm-generic/cacheflush.h" 1 +# 10 "./include/asm-generic/cacheflush.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_cache_all(void) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_cache_mm(struct mm_struct *mm) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_cache_dup_mm(struct mm_struct *mm) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_cache_page(struct vm_area_struct *vma, + unsigned long vmaddr, + unsigned long pfn) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_dcache_page(struct page *page) +{ +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_dcache_mmap_lock(struct address_space *mapping) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_dcache_mmap_unlock(struct address_space *mapping) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_icache_range(unsigned long start, unsigned long end) +{ +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_icache_user_page(struct vm_area_struct *vma, + struct page *page, + unsigned long addr, int len) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_cache_vmap(unsigned long start, unsigned long end) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_cache_vunmap(unsigned long start, unsigned long end) +{ +} +# 9 "./arch/x86/include/asm/cacheflush.h" 2 + + +void clflush_cache_range(void *addr, unsigned int size); +# 13 "./include/linux/highmem.h" 2 + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) +{ +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_kernel_dcache_page(struct page *page) +{ +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_kernel_vmap_range(void *vaddr, int size) +{ +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void invalidate_kernel_vmap_range(void *vaddr, int size) +{ +} +# 128 "./include/linux/highmem.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int nr_free_highpages(void) { return 0; } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *kmap_to_page(void *addr) +{ + return (((struct page *)vmemmap_base) + (__phys_addr((unsigned long)(addr)) >> 12)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long totalhigh_pages(void) { return 0UL; } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kmap(struct page *page) +{ + do { __might_sleep("include/linux/highmem.h", 139, 0); do { } while (0); } while (0); + return lowmem_page_address(page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kunmap_high(struct page *page) +{ +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kunmap(struct page *page) +{ + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *kmap_atomic(struct page *page) +{ + do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); + pagefault_disable(); + return lowmem_page_address(page); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void kunmap_atomic_high(void *addr) +{ + + + + + + + +} +# 227 "./include/linux/highmem.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *addr = kmap_atomic(page); + clear_user_page(addr, vaddr, page); + do { do { extern void __compiletime_assert_1046(void) __attribute__((__error__("BUILD_BUG_ON failed: " "__same_type((addr), struct page *)"))); if (!(!(__builtin_types_compatible_p(typeof((addr)), typeof(struct page *))))) __compiletime_assert_1046(); } while (0); kunmap_atomic_high(addr); pagefault_enable(); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); +} +# 273 "./include/linux/highmem.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page * +alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr) +{ + return alloc_pages_vma((((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x100000u)) | (( gfp_t)0x02u)) | (( gfp_t)0x100u) | (( gfp_t)0x08u), 0, vma, vaddr, numa_node_id(), false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_highpage(struct page *page) +{ + void *kaddr = kmap_atomic(page); + clear_page(kaddr); + do { do { extern void __compiletime_assert_1047(void) __attribute__((__error__("BUILD_BUG_ON failed: " "__same_type((kaddr), struct page *)"))); if (!(!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) __compiletime_assert_1047(); } while (0); kunmap_atomic_high(kaddr); pagefault_enable(); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +{ + void *kaddr = kmap_atomic(page); + + do { if (__builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1048)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (293), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1049)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + if (end1 > start1) + memset(kaddr + start1, 0, end1 - start1); + + if (end2 > start2) + memset(kaddr + start2, 0, end2 - start2); + + do { do { extern void __compiletime_assert_1050(void) __attribute__((__error__("BUILD_BUG_ON failed: " "__same_type((kaddr), struct page *)"))); if (!(!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) __compiletime_assert_1050(); } while (0); kunmap_atomic_high(kaddr); pagefault_enable(); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); + flush_dcache_page(page); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zero_user_segment(struct page *page, + unsigned start, unsigned end) +{ + zero_user_segments(page, start, end, 0, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zero_user(struct page *page, + unsigned start, unsigned size) +{ + zero_user_segments(page, start, start + size, 0, 0); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_user_page(vto, vfrom, vaddr, to); + do { do { extern void __compiletime_assert_1051(void) __attribute__((__error__("BUILD_BUG_ON failed: " "__same_type((vto), struct page *)"))); if (!(!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) __compiletime_assert_1051(); } while (0); kunmap_atomic_high(vto); pagefault_enable(); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); + do { do { extern void __compiletime_assert_1052(void) __attribute__((__error__("BUILD_BUG_ON failed: " "__same_type((vfrom), struct page *)"))); if (!(!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) __compiletime_assert_1052(); } while (0); kunmap_atomic_high(vfrom); pagefault_enable(); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void copy_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_page(vto, vfrom); + do { do { extern void __compiletime_assert_1053(void) __attribute__((__error__("BUILD_BUG_ON failed: " "__same_type((vto), struct page *)"))); if (!(!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) __compiletime_assert_1053(); } while (0); kunmap_atomic_high(vto); pagefault_enable(); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); + do { do { extern void __compiletime_assert_1054(void) __attribute__((__error__("BUILD_BUG_ON failed: " "__same_type((vfrom), struct page *)"))); if (!(!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) __compiletime_assert_1054(); } while (0); kunmap_atomic_high(vfrom); pagefault_enable(); do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); +} +# 12 "./include/linux/pagemap.h" 2 + + + + + +# 1 "./include/linux/hugetlb_inline.h" 1 +# 9 "./include/linux/hugetlb_inline.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_vm_hugetlb_page(struct vm_area_struct *vma) +{ + return !!(vma->vm_flags & 0x00400000); +} +# 18 "./include/linux/pagemap.h" 2 + +struct pagevec; + + + + +enum mapping_flags { + AS_EIO = 0, + AS_ENOSPC = 1, + AS_MM_ALL_LOCKS = 2, + AS_UNEVICTABLE = 3, + AS_EXITING = 4, + + AS_NO_WRITEBACK_TAGS = 5, +}; +# 48 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mapping_set_error(struct address_space *mapping, int error) +{ + if (__builtin_expect(!!(!error), 1)) + return; + + + __filemap_set_wb_err(mapping, error); + + + errseq_set(&mapping->host->i_sb->s_wb_err, error); + + + if (error == -28) + set_bit(AS_ENOSPC, &mapping->flags); + else + set_bit(AS_EIO, &mapping->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mapping_set_unevictable(struct address_space *mapping) +{ + set_bit(AS_UNEVICTABLE, &mapping->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mapping_clear_unevictable(struct address_space *mapping) +{ + clear_bit(AS_UNEVICTABLE, &mapping->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mapping_unevictable(struct address_space *mapping) +{ + return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mapping_set_exiting(struct address_space *mapping) +{ + set_bit(AS_EXITING, &mapping->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mapping_exiting(struct address_space *mapping) +{ + return test_bit(AS_EXITING, &mapping->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mapping_set_no_writeback_tags(struct address_space *mapping) +{ + set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mapping_use_writeback_tags(struct address_space *mapping) +{ + return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gfp_t mapping_gfp_mask(struct address_space * mapping) +{ + return mapping->gfp_mask; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gfp_t mapping_gfp_constraint(struct address_space *mapping, + gfp_t gfp_mask) +{ + return mapping_gfp_mask(mapping) & gfp_mask; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) +{ + m->gfp_mask = mask; +} + +void release_pages(struct page **pages, int nr); +# 168 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __page_cache_add_speculative(struct page *page, int count) +{ +# 187 "./include/linux/pagemap.h" + if (__builtin_expect(!!(!page_ref_add_unless(page, count, 0)), 0)) { + + + + + + return 0; + } + + do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1055)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/pagemap.h"), "i" (196), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1056)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + + return 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_cache_get_speculative(struct page *page) +{ + return __page_cache_add_speculative(page, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_cache_add_speculative(struct page *page, int count) +{ + return __page_cache_add_speculative(page, count); +} +# 219 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void attach_page_private(struct page *page, void *data) +{ + get_page(page); + set_page_private(page, (unsigned long)data); + SetPagePrivate(page); +} +# 235 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *detach_page_private(struct page *page) +{ + void *data = (void *)((page)->private); + + if (!PagePrivate(page)) + return ((void *)0); + ClearPagePrivate(page); + set_page_private(page, 0); + put_page(page); + + return data; +} + + +extern struct page *__page_cache_alloc(gfp_t gfp); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *page_cache_alloc(struct address_space *x) +{ + return __page_cache_alloc(mapping_gfp_mask(x)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gfp_t readahead_gfp_mask(struct address_space *x) +{ + return mapping_gfp_mask(x) | (( gfp_t)0x10000u) | (( gfp_t)0x2000u); +} + +typedef int filler_t(void *, struct page *); + +unsigned long page_cache_next_miss(struct address_space *mapping, + unsigned long index, unsigned long max_scan); +unsigned long page_cache_prev_miss(struct address_space *mapping, + unsigned long index, unsigned long max_scan); +# 282 "./include/linux/pagemap.h" +struct page *pagecache_get_page(struct address_space *mapping, unsigned long offset, + int fgp_flags, gfp_t cache_gfp_mask); +# 295 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *find_get_page(struct address_space *mapping, + unsigned long offset) +{ + return pagecache_get_page(mapping, offset, 0, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *find_get_page_flags(struct address_space *mapping, + unsigned long offset, int fgp_flags) +{ + return pagecache_get_page(mapping, offset, fgp_flags, 0); +} +# 320 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *find_lock_page(struct address_space *mapping, + unsigned long offset) +{ + return pagecache_get_page(mapping, offset, 0x00000002, 0); +} +# 345 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *find_or_create_page(struct address_space *mapping, + unsigned long index, gfp_t gfp_mask) +{ + return pagecache_get_page(mapping, index, + 0x00000002|0x00000001|0x00000004, + gfp_mask); +} +# 366 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *grab_cache_page_nowait(struct address_space *mapping, + unsigned long index) +{ + return pagecache_get_page(mapping, index, + 0x00000002|0x00000004|0x00000010|0x00000020, + mapping_gfp_mask(mapping)); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *find_subpage(struct page *head, unsigned long index) +{ + + if (PageHuge(head)) + return head; + + return head + (index & (hpage_nr_pages(head) - 1)); +} + +struct page *find_get_entry(struct address_space *mapping, unsigned long offset); +struct page *find_lock_entry(struct address_space *mapping, unsigned long offset); +unsigned find_get_entries(struct address_space *mapping, unsigned long start, + unsigned int nr_entries, struct page **entries, + unsigned long *indices); +unsigned find_get_pages_range(struct address_space *mapping, unsigned long *start, + unsigned long end, unsigned int nr_pages, + struct page **pages); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned find_get_pages(struct address_space *mapping, + unsigned long *start, unsigned int nr_pages, + struct page **pages) +{ + return find_get_pages_range(mapping, start, (unsigned long)-1, nr_pages, + pages); +} +unsigned find_get_pages_contig(struct address_space *mapping, unsigned long start, + unsigned int nr_pages, struct page **pages); +unsigned find_get_pages_range_tag(struct address_space *mapping, unsigned long *index, + unsigned long end, xa_mark_t tag, unsigned int nr_pages, + struct page **pages); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned find_get_pages_tag(struct address_space *mapping, + unsigned long *index, xa_mark_t tag, unsigned int nr_pages, + struct page **pages) +{ + return find_get_pages_range_tag(mapping, index, (unsigned long)-1, tag, + nr_pages, pages); +} + +struct page *grab_cache_page_write_begin(struct address_space *mapping, + unsigned long index, unsigned flags); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *grab_cache_page(struct address_space *mapping, + unsigned long index) +{ + return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); +} + +extern struct page * read_cache_page(struct address_space *mapping, + unsigned long index, filler_t *filler, void *data); +extern struct page * read_cache_page_gfp(struct address_space *mapping, + unsigned long index, gfp_t gfp_mask); +extern int read_cache_pages(struct address_space *mapping, + struct list_head *pages, filler_t *filler, void *data); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *read_mapping_page(struct address_space *mapping, + unsigned long index, void *data) +{ + return read_cache_page(mapping, index, ((void *)0), data); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_to_index(struct page *page) +{ + unsigned long pgoff; + + if (__builtin_expect(!!(!PageTransTail(page)), 1)) + return page->index; + + + + + + pgoff = compound_head(page)->index; + pgoff += page - compound_head(page); + return pgoff; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_to_pgoff(struct page *page) +{ + if (__builtin_expect(!!(PageHeadHuge(page)), 0)) + return page->index << compound_order(page); + + return page_to_index(page); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) loff_t page_offset(struct page *page) +{ + return ((loff_t)page->index) << 12; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) loff_t page_file_offset(struct page *page) +{ + return ((loff_t)page_index(page)) << 12; +} + +extern unsigned long linear_hugepage_index(struct vm_area_struct *vma, + unsigned long address); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long linear_page_index(struct vm_area_struct *vma, + unsigned long address) +{ + unsigned long pgoff; + if (__builtin_expect(!!(is_vm_hugetlb_page(vma)), 0)) + return linear_hugepage_index(vma, address); + pgoff = (address - vma->vm_start) >> 12; + pgoff += vma->vm_pgoff; + return pgoff; +} + + +struct wait_page_key { + struct page *page; + int bit_nr; + int page_match; +}; + +struct wait_page_queue { + struct page *page; + int bit_nr; + wait_queue_entry_t wait; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int wake_page_match(struct wait_page_queue *wait_page, + struct wait_page_key *key) +{ + if (wait_page->page != key->page) + return 0; + key->page_match = 1; + + if (wait_page->bit_nr != key->bit_nr) + return 0; +# 530 "./include/linux/pagemap.h" + if (test_bit(key->bit_nr, &key->page->flags)) + return -1; + + return 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int kiocb_wait_page_queue_init(struct kiocb *kiocb, + struct wait_page_queue *wait, + wait_queue_func_t func, + void *data) +{ + + if (kiocb->ki_flags & (1 << 3)) + return -22; + if (kiocb->ki_filp->f_mode & (( fmode_t)0x40000000)) { + wait->wait.func = func; + wait->wait.private = data; + wait->wait.flags = 0; + INIT_LIST_HEAD(&wait->wait.entry); + kiocb->ki_flags |= (1 << 8); + kiocb->ki_waitq = wait; + return 0; + } + + return -95; +} + +extern void __lock_page(struct page *page); +extern int __lock_page_killable(struct page *page); +extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); +extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags); +extern void unlock_page(struct page *page); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int trylock_page(struct page *page) +{ + page = compound_head(page); + return (__builtin_expect(!!(!test_and_set_bit_lock(PG_locked, &page->flags)), 1)); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lock_page(struct page *page) +{ + do { __might_sleep("include/linux/pagemap.h", 578, 0); do { } while (0); } while (0); + if (!trylock_page(page)) + __lock_page(page); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int lock_page_killable(struct page *page) +{ + do { __might_sleep("include/linux/pagemap.h", 590, 0); do { } while (0); } while (0); + if (!trylock_page(page)) + return __lock_page_killable(page); + return 0; +} +# 604 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int lock_page_async(struct page *page, + struct wait_page_queue *wait) +{ + if (!trylock_page(page)) + return __lock_page_async(page, wait); + return 0; +} +# 619 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags) +{ + do { __might_sleep("include/linux/pagemap.h", 622, 0); do { } while (0); } while (0); + return trylock_page(page) || __lock_page_or_retry(page, mm, flags); +} + + + + + +extern void wait_on_page_bit(struct page *page, int bit_nr); +extern int wait_on_page_bit_killable(struct page *page, int bit_nr); +# 640 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wait_on_page_locked(struct page *page) +{ + if (PageLocked(page)) + wait_on_page_bit(compound_head(page), PG_locked); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int wait_on_page_locked_killable(struct page *page) +{ + if (!PageLocked(page)) + return 0; + return wait_on_page_bit_killable(compound_head(page), PG_locked); +} + +extern void put_and_wait_on_page_locked(struct page *page); + +void wait_on_page_writeback(struct page *page); +extern void end_page_writeback(struct page *page); +void wait_for_stable_page(struct page *page); + +void page_endio(struct page *page, bool is_write, int err); + + + + +extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fault_in_pages_writeable(char *uaddr, int size) +{ + char *end = uaddr + size - 1; + + if (__builtin_expect(!!(size == 0), 0)) + return 0; + + if (__builtin_expect(!!(uaddr > end), 0)) + return -14; + + + + + do { + if (__builtin_expect(!!(({ __label__ __pu_label; int __pu_err = -14; __typeof__(*((uaddr))) __pu_val = ((__typeof__(*(uaddr)))(0)); __typeof__((uaddr)) __pu_ptr = ((uaddr)); __typeof__(sizeof(*(uaddr))) __pu_size = (sizeof(*(uaddr))); stac(); do { (void)0; switch (__pu_size) { case 1: do { asm goto("\n" "1: mov""b"" %0,%1\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "%l2" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : : "iq"(__pu_val), "m" ((*(struct __large_struct *)(__pu_ptr))) : : __pu_label); asm (""); } while (0); break; case 2: do { asm goto("\n" "1: mov""w"" %0,%1\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "%l2" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : : "ir"(__pu_val), "m" ((*(struct __large_struct *)(__pu_ptr))) : : __pu_label); asm (""); } while (0); break; case 4: do { asm goto("\n" "1: mov""l"" %0,%1\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "%l2" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : : "ir"(__pu_val), "m" ((*(struct __large_struct *)(__pu_ptr))) : : __pu_label); asm (""); } while (0); break; case 8: do { asm goto("\n" "1: mov""q"" %0,%1\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "%l2" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : : "er"(__pu_val), "m" ((*(struct __large_struct *)(__pu_ptr))) : : __pu_label); asm (""); } while (0); break; default: __put_user_bad(); } } while (0); __pu_err = 0; __pu_label: clac(); __builtin_expect(__pu_err, 0); }) != 0), 0)) + return -14; + uaddr += ((1UL) << 12); + } while (uaddr <= end); + + + if (((unsigned long)uaddr & (~(((1UL) << 12)-1))) == + ((unsigned long)end & (~(((1UL) << 12)-1)))) + return ({ __label__ __pu_label; int __pu_err = -14; __typeof__(*((end))) __pu_val = ((__typeof__(*(end)))(0)); __typeof__((end)) __pu_ptr = ((end)); __typeof__(sizeof(*(end))) __pu_size = (sizeof(*(end))); stac(); do { (void)0; switch (__pu_size) { case 1: do { asm goto("\n" "1: mov""b"" %0,%1\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "%l2" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : : "iq"(__pu_val), "m" ((*(struct __large_struct *)(__pu_ptr))) : : __pu_label); asm (""); } while (0); break; case 2: do { asm goto("\n" "1: mov""w"" %0,%1\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "%l2" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : : "ir"(__pu_val), "m" ((*(struct __large_struct *)(__pu_ptr))) : : __pu_label); asm (""); } while (0); break; case 4: do { asm goto("\n" "1: mov""l"" %0,%1\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "%l2" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : : "ir"(__pu_val), "m" ((*(struct __large_struct *)(__pu_ptr))) : : __pu_label); asm (""); } while (0); break; case 8: do { asm goto("\n" "1: mov""q"" %0,%1\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "%l2" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : : "er"(__pu_val), "m" ((*(struct __large_struct *)(__pu_ptr))) : : __pu_label); asm (""); } while (0); break; default: __put_user_bad(); } } while (0); __pu_err = 0; __pu_label: clac(); __builtin_expect(__pu_err, 0); }); + + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fault_in_pages_readable(const char *uaddr, int size) +{ + volatile char c; + const char *end = uaddr + size - 1; + + if (__builtin_expect(!!(size == 0), 0)) + return 0; + + if (__builtin_expect(!!(uaddr > end), 0)) + return -14; + + do { + if (__builtin_expect(!!(({ int __gu_err; __typeof__( __builtin_choose_expr(sizeof(*((uaddr)))<=sizeof(char),(unsigned char)0,__builtin_choose_expr(sizeof(*((uaddr)))<=sizeof(short),(unsigned short)0,__builtin_choose_expr(sizeof(*((uaddr)))<=sizeof(int),(unsigned int)0,__builtin_choose_expr(sizeof(*((uaddr)))<=sizeof(long),(unsigned long)0,0ULL))))) __gu_val; __typeof__((uaddr)) __gu_ptr = ((uaddr)); __typeof__(sizeof(*(uaddr))) __gu_size = (sizeof(*(uaddr))); ({ stac(); asm volatile ("# ALT: oldnstr\n" "661:\n\t" "" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+18)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "lfence" "\n" "665""1" ":\n" ".popsection\n" : : : "memory"); }); do { __gu_err = 0; (void)0; switch (__gu_size) { case 1: asm volatile("\n" "1: mov""b"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""b"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=q"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""w"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 4: asm volatile("\n" "1: mov""l"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""l"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""q"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; default: (__gu_val) = __get_user_bad(); } } while (0); clac(); ((c)) = ( __typeof__(*((uaddr))))__gu_val; __builtin_expect(__gu_err, 0); }) != 0), 0)) + return -14; + uaddr += ((1UL) << 12); + } while (uaddr <= end); + + + if (((unsigned long)uaddr & (~(((1UL) << 12)-1))) == + ((unsigned long)end & (~(((1UL) << 12)-1)))) { + return ({ int __gu_err; __typeof__( __builtin_choose_expr(sizeof(*((end)))<=sizeof(char),(unsigned char)0,__builtin_choose_expr(sizeof(*((end)))<=sizeof(short),(unsigned short)0,__builtin_choose_expr(sizeof(*((end)))<=sizeof(int),(unsigned int)0,__builtin_choose_expr(sizeof(*((end)))<=sizeof(long),(unsigned long)0,0ULL))))) __gu_val; __typeof__((end)) __gu_ptr = ((end)); __typeof__(sizeof(*(end))) __gu_size = (sizeof(*(end))); ({ stac(); asm volatile ("# ALT: oldnstr\n" "661:\n\t" "" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+18)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "lfence" "\n" "665""1" ":\n" ".popsection\n" : : : "memory"); }); do { __gu_err = 0; (void)0; switch (__gu_size) { case 1: asm volatile("\n" "1: mov""b"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""b"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=q"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""w"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 4: asm volatile("\n" "1: mov""l"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""l"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""q"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; default: (__gu_val) = __get_user_bad(); } } while (0); clac(); ((c)) = ( __typeof__(*((end))))__gu_val; __builtin_expect(__gu_err, 0); }); + } + + (void)c; + return 0; +} + +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + unsigned long index, gfp_t gfp_mask); +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + unsigned long index, gfp_t gfp_mask); +extern void delete_from_page_cache(struct page *page); +extern void __delete_from_page_cache(struct page *page, void *shadow); +int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); +void delete_from_page_cache_batch(struct address_space *mapping, + struct pagevec *pvec); + + + +void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, + struct file *, unsigned long index, unsigned long req_count); +void page_cache_async_readahead(struct address_space *, struct file_ra_state *, + struct file *, struct page *, unsigned long index, + unsigned long req_count); +void page_cache_readahead_unbounded(struct address_space *, struct file *, + unsigned long index, unsigned long nr_to_read, + unsigned long lookahead_count); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int add_to_page_cache(struct page *page, + struct address_space *mapping, unsigned long offset, gfp_t gfp_mask) +{ + int error; + + __SetPageLocked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (__builtin_expect(!!(error), 0)) + __ClearPageLocked(page); + return error; +} +# 775 "./include/linux/pagemap.h" +struct readahead_control { + struct file *file; + struct address_space *mapping; + + unsigned long _index; + unsigned int _nr_pages; + unsigned int _batch_count; +}; +# 793 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *readahead_page(struct readahead_control *rac) +{ + struct page *page; + + do { if (__builtin_expect(!!(rac->_batch_count > rac->_nr_pages), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1057)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/pagemap.h"), "i" (797), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1058)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + rac->_nr_pages -= rac->_batch_count; + rac->_index += rac->_batch_count; + + if (!rac->_nr_pages) { + rac->_batch_count = 0; + return ((void *)0); + } + + page = xa_load(&rac->mapping->i_pages, rac->_index); + do { if (__builtin_expect(!!(!PageLocked(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageLocked(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1059)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/pagemap.h"), "i" (807), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1060)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + rac->_batch_count = hpage_nr_pages(page); + + return page; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __readahead_batch(struct readahead_control *rac, + struct page **array, unsigned int array_sz) +{ + unsigned int i = 0; + struct xa_state xas = { .xa = &rac->mapping->i_pages, .xa_index = 0, .xa_shift = 0, .xa_sibs = 0, .xa_offset = 0, .xa_pad = 0, .xa_node = ((struct xa_node *)3UL), .xa_alloc = ((void *)0), .xa_update = ((void *)0) }; + struct page *page; + + do { if (__builtin_expect(!!(rac->_batch_count > rac->_nr_pages), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1061)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/pagemap.h"), "i" (820), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1062)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + rac->_nr_pages -= rac->_batch_count; + rac->_index += rac->_batch_count; + rac->_batch_count = 0; + + xas_set(&xas, rac->_index); + rcu_read_lock(); + for (page = xas_find(&xas, rac->_index + rac->_nr_pages - 1); page; page = xas_next_entry(&xas, rac->_index + rac->_nr_pages - 1)) { + do { if (__builtin_expect(!!(!PageLocked(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageLocked(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1063)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/pagemap.h"), "i" (828), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1064)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + do { if (__builtin_expect(!!(PageTail(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "PageTail(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1065)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/pagemap.h"), "i" (829), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1066)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + array[i++] = page; + rac->_batch_count += hpage_nr_pages(page); + + + + + + + + if (PageHead(page)) + xas_set(&xas, rac->_index + rac->_batch_count); + + if (i == array_sz) + break; + } + rcu_read_unlock(); + + return i; +} +# 868 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) loff_t readahead_pos(struct readahead_control *rac) +{ + return (loff_t)rac->_index * ((1UL) << 12); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) loff_t readahead_length(struct readahead_control *rac) +{ + return (loff_t)rac->_nr_pages * ((1UL) << 12); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long readahead_index(struct readahead_control *rac) +{ + return rac->_index; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int readahead_count(struct readahead_control *rac) +{ + return rac->_nr_pages; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long dir_pages(struct inode *inode) +{ + return (unsigned long)(inode->i_size + ((1UL) << 12) - 1) >> + 12; +} +# 914 "./include/linux/pagemap.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int page_mkwrite_check_truncate(struct page *page, + struct inode *inode) +{ + loff_t size = i_size_read(inode); + unsigned long index = size >> 12; + int offset = ((unsigned long)(size) & ~(~(((1UL) << 12)-1))); + + if (page->mapping != inode->i_mapping) + return -14; + + + if (page->index < index) + return ((1UL) << 12); + + if (page->index > index || !offset) + return -14; + + return offset; +} +# 17 "./include/linux/blkdev.h" 2 +# 1 "./include/linux/backing-dev-defs.h" 1 +# 11 "./include/linux/backing-dev-defs.h" +# 1 "./include/linux/flex_proportions.h" 1 +# 28 "./include/linux/flex_proportions.h" +struct fprop_global { + + struct percpu_counter events; + + unsigned int period; + + seqcount_t sequence; +}; + +int fprop_global_init(struct fprop_global *p, gfp_t gfp); +void fprop_global_destroy(struct fprop_global *p); +bool fprop_new_period(struct fprop_global *p, int periods); + + + + +struct fprop_local_single { + + unsigned long events; + + unsigned int period; + raw_spinlock_t lock; +}; + + + + + +int fprop_local_init_single(struct fprop_local_single *pl); +void fprop_local_destroy_single(struct fprop_local_single *pl); +void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl); +void fprop_fraction_single(struct fprop_global *p, + struct fprop_local_single *pl, unsigned long *numerator, + unsigned long *denominator); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + __fprop_inc_single(p, pl); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} + + + + +struct fprop_local_percpu { + + struct percpu_counter events; + + unsigned int period; + raw_spinlock_t lock; +}; + +int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp); +void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); +void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); +void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, + int max_frac); +void fprop_fraction_percpu(struct fprop_global *p, + struct fprop_local_percpu *pl, unsigned long *numerator, + unsigned long *denominator); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + __fprop_inc_percpu(p, pl); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} +# 12 "./include/linux/backing-dev-defs.h" 2 + + + + + +struct page; +struct device; +struct dentry; + + + + +enum wb_state { + WB_registered, + WB_writeback_running, + WB_has_dirty_io, + WB_start_all, +}; + +enum wb_congested_state { + WB_async_congested, + WB_sync_congested, +}; + +typedef int (congested_fn)(void *, int); + +enum wb_stat_item { + WB_RECLAIMABLE, + WB_WRITEBACK, + WB_DIRTIED, + WB_WRITTEN, + NR_WB_STAT_ITEMS +}; + + + + + + +enum wb_reason { + WB_REASON_BACKGROUND, + WB_REASON_VMSCAN, + WB_REASON_SYNC, + WB_REASON_PERIODIC, + WB_REASON_LAPTOP_TIMER, + WB_REASON_FS_FREE_SPACE, + + + + + + + WB_REASON_FORKER_THREAD, + WB_REASON_FOREIGN_FLUSH, + + WB_REASON_MAX, +}; + +struct wb_completion { + atomic_t cnt; + wait_queue_head_t *waitq; +}; +# 97 "./include/linux/backing-dev-defs.h" +struct bdi_writeback_congested { + unsigned long state; + refcount_t refcnt; + + + struct backing_dev_info *__bdi; + + + int blkcg_id; + struct rb_node rb_node; + +}; +# 129 "./include/linux/backing-dev-defs.h" +struct bdi_writeback { + struct backing_dev_info *bdi; + + unsigned long state; + unsigned long last_old_flush; + + struct list_head b_dirty; + struct list_head b_io; + struct list_head b_more_io; + struct list_head b_dirty_time; + spinlock_t list_lock; + + struct percpu_counter stat[NR_WB_STAT_ITEMS]; + + struct bdi_writeback_congested *congested; + + unsigned long bw_time_stamp; + unsigned long dirtied_stamp; + unsigned long written_stamp; + unsigned long write_bandwidth; + unsigned long avg_write_bandwidth; + + + + + + + + unsigned long dirty_ratelimit; + unsigned long balanced_dirty_ratelimit; + + struct fprop_local_percpu completions; + int dirty_exceeded; + enum wb_reason start_all_reason; + + spinlock_t work_lock; + struct list_head work_list; + struct delayed_work dwork; + + unsigned long dirty_sleep; + + struct list_head bdi_node; + + + struct percpu_ref refcnt; + struct fprop_local_percpu memcg_completions; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; + struct list_head memcg_node; + struct list_head blkcg_node; + + union { + struct work_struct release_work; + struct callback_head rcu; + }; + +}; + +struct backing_dev_info { + u64 id; + struct rb_node rb_node; + struct list_head bdi_list; + unsigned long ra_pages; + unsigned long io_pages; + congested_fn *congested_fn; + void *congested_data; + + struct kref refcnt; + unsigned int capabilities; + unsigned int min_ratio; + unsigned int max_ratio, max_prop_frac; + + + + + + atomic_long_t tot_write_bandwidth; + + struct bdi_writeback wb; + struct list_head wb_list; + + struct xarray cgwb_tree; + struct rb_root cgwb_congested_tree; + struct mutex cgwb_release_mutex; + struct rw_semaphore wb_switch_rwsem; + + + + wait_queue_head_t wb_waitq; + + struct device *dev; + char dev_name[64]; + struct device *owner; + + struct timer_list laptop_mode_wb_timer; + + + struct dentry *debug_dir; + +}; + +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; + +void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); +void set_wb_congested(struct bdi_writeback_congested *congested, int sync); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void clear_bdi_congested(struct backing_dev_info *bdi, int sync) +{ + clear_wb_congested(bdi->wb.congested, sync); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_bdi_congested(struct backing_dev_info *bdi, int sync) +{ + set_wb_congested(bdi->wb.congested, sync); +} + +struct wb_lock_cookie { + bool locked; + unsigned long flags; +}; + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool wb_tryget(struct bdi_writeback *wb) +{ + if (wb != &wb->bdi->wb) + return percpu_ref_tryget(&wb->refcnt); + return true; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wb_get(struct bdi_writeback *wb) +{ + if (wb != &wb->bdi->wb) + percpu_ref_get(&wb->refcnt); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wb_put(struct bdi_writeback *wb) +{ + if (({ int __ret_warn_on = !!(!wb->bdi); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1067)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/backing-dev-defs.h"), "i" (282), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1068)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1069)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) { + + + + + return; + } + + if (wb != &wb->bdi->wb) + percpu_ref_put(&wb->refcnt); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool wb_dying(struct bdi_writeback *wb) +{ + return percpu_ref_is_dying(&wb->refcnt); +} +# 18 "./include/linux/blkdev.h" 2 + +# 1 "./include/linux/mempool.h" 1 +# 11 "./include/linux/mempool.h" +struct kmem_cache; + +typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); +typedef void (mempool_free_t)(void *element, void *pool_data); + +typedef struct mempool_s { + spinlock_t lock; + int min_nr; + int curr_nr; + void **elements; + + void *pool_data; + mempool_alloc_t *alloc; + mempool_free_t *free; + wait_queue_head_t wait; +} mempool_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mempool_initialized(mempool_t *pool) +{ + return pool->elements != ((void *)0); +} + +void mempool_exit(mempool_t *pool); +int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id); +int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); + +extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data); +extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int nid); + +extern int mempool_resize(mempool_t *pool, int new_min_nr); +extern void mempool_destroy(mempool_t *pool); +extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __attribute__((__malloc__)); +extern void mempool_free(void *element, mempool_t *pool); + + + + + + +void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); +void mempool_free_slab(void *element, void *pool_data); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc) +{ + return mempool_init(pool, min_nr, mempool_alloc_slab, + mempool_free_slab, (void *) kc); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) mempool_t * +mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) +{ + return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, + (void *) kc); +} + + + + + +void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); +void mempool_kfree(void *element, void *pool_data); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size) +{ + return mempool_init(pool, min_nr, mempool_kmalloc, + mempool_kfree, (void *) size); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) +{ + return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, + (void *) size); +} + + + + + +void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); +void mempool_free_pages(void *element, void *pool_data); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) +{ + return mempool_init(pool, min_nr, mempool_alloc_pages, + mempool_free_pages, (void *)(long)order); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) mempool_t *mempool_create_page_pool(int min_nr, int order) +{ + return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, + (void *)(long)order); +} +# 20 "./include/linux/blkdev.h" 2 + +# 1 "./include/linux/bio.h" 1 +# 61 "./include/linux/bio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_has_data(struct bio *bio) +{ + if (bio && + bio->bi_iter.bi_size && + ((bio)->bi_opf & ((1 << 8) - 1)) != REQ_OP_DISCARD && + ((bio)->bi_opf & ((1 << 8) - 1)) != REQ_OP_SECURE_ERASE && + ((bio)->bi_opf & ((1 << 8) - 1)) != REQ_OP_WRITE_ZEROES) + return true; + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_no_advance_iter(const struct bio *bio) +{ + return ((bio)->bi_opf & ((1 << 8) - 1)) == REQ_OP_DISCARD || + ((bio)->bi_opf & ((1 << 8) - 1)) == REQ_OP_SECURE_ERASE || + ((bio)->bi_opf & ((1 << 8) - 1)) == REQ_OP_WRITE_SAME || + ((bio)->bi_opf & ((1 << 8) - 1)) == REQ_OP_WRITE_ZEROES; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_mergeable(struct bio *bio) +{ + if (bio->bi_opf & ((1ULL << __REQ_NOMERGE) | (1ULL << __REQ_PREFLUSH) | (1ULL << __REQ_FUA))) + return false; + + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int bio_cur_bytes(struct bio *bio) +{ + if (bio_has_data(bio)) + return ((struct bio_vec) { .bv_page = (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_page) + (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) / ((1UL) << 12))), .bv_len = __builtin_choose_expr(((!!(sizeof((typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1070 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1071 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1070) < (__UNIQUE_ID___y1071) ? (__UNIQUE_ID___x1070) : (__UNIQUE_ID___y1071)); })))) *)1 == (typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1070 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1071 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1070) < (__UNIQUE_ID___y1071) ? (__UNIQUE_ID___x1070) : (__UNIQUE_ID___y1071)); })))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) * 0l)) : (int *)8))))), (((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1070 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1071 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1070) < (__UNIQUE_ID___y1071) ? (__UNIQUE_ID___x1070) : (__UNIQUE_ID___y1071)); })))) < ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) ? ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1070 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1071 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1070) < (__UNIQUE_ID___y1071) ? (__UNIQUE_ID___x1070) : (__UNIQUE_ID___y1071)); })))) : ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12))))), ({ typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1070 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1071 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1070) < (__UNIQUE_ID___y1071) ? (__UNIQUE_ID___x1070) : (__UNIQUE_ID___y1071)); })))) __UNIQUE_ID___x1072 = ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1070 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1071 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1070) < (__UNIQUE_ID___y1071) ? (__UNIQUE_ID___x1070) : (__UNIQUE_ID___y1071)); })))); typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) __UNIQUE_ID___y1073 = ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))); ((__UNIQUE_ID___x1072) < (__UNIQUE_ID___y1073) ? (__UNIQUE_ID___x1072) : (__UNIQUE_ID___y1073)); })), .bv_offset = (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_offset + (((((bio)->bi_iter)))).bi_bvec_done) % ((1UL) << 12)), }).bv_len; + else + return bio->bi_iter.bi_size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *bio_data(struct bio *bio) +{ + if (bio_has_data(bio)) + return lowmem_page_address((((&(((((bio))->bi_io_vec)))[(((((bio)->bi_iter)))).bi_idx])->bv_page) + (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_offset + (((((bio)->bi_iter)))).bi_bvec_done) / ((1UL) << 12)))) + (((&(((((bio))->bi_io_vec)))[(((((bio)->bi_iter)))).bi_idx])->bv_offset + ((((bio)->bi_iter))).bi_bvec_done) % ((1UL) << 12)); + + return ((void *)0); +} +# 113 "./include/linux/bio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_full(struct bio *bio, unsigned len) +{ + if (bio->bi_vcnt >= bio->bi_max_vecs) + return true; + + if (bio->bi_iter.bi_size > (~0U) - len) + return true; + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_next_segment(const struct bio *bio, + struct bvec_iter_all *iter) +{ + if (iter->idx >= bio->bi_vcnt) + return false; + + bvec_advance(&bio->bi_io_vec[iter->idx], iter); + return true; +} +# 141 "./include/linux/bio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_advance_iter(const struct bio *bio, + struct bvec_iter *iter, unsigned int bytes) +{ + iter->bi_sector += bytes >> 9; + + if (bio_no_advance_iter(bio)) + iter->bi_size -= bytes; + else + bvec_iter_advance(bio->bi_io_vec, iter, bytes); + +} +# 182 "./include/linux/bio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned bio_segments(struct bio *bio) +{ + unsigned segs = 0; + struct bio_vec bv; + struct bvec_iter iter; + + + + + + + switch (((bio)->bi_opf & ((1 << 8) - 1))) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + return 0; + case REQ_OP_WRITE_SAME: + return 1; + default: + break; + } + + for (iter = ((bio)->bi_iter); (iter).bi_size && ((bv = ((struct bio_vec) { .bv_page = (((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_page) + (((&(((((((bio))->bi_io_vec)))))[(((((((iter))))))).bi_idx])->bv_offset + ((((((iter)))))).bi_bvec_done) / ((1UL) << 12))), .bv_len = __builtin_choose_expr(((!!(sizeof((typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((iter))))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((iter))))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((iter))))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) ? ((((((iter))))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done)), ({ typeof((((((iter))))).bi_size) __UNIQUE_ID___x1074 = ((((((iter))))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) __UNIQUE_ID___y1075 = ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done); ((__UNIQUE_ID___x1074) < (__UNIQUE_ID___y1075) ? (__UNIQUE_ID___x1074) : (__UNIQUE_ID___y1075)); })))) *)1 == (typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((iter))))))).bi_idx])->bv_offset + ((((((iter)))))).bi_bvec_done) % ((1UL) << 12)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((iter))))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((iter))))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((iter))))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) ? ((((((iter))))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done)), ({ typeof((((((iter))))).bi_size) __UNIQUE_ID___x1074 = ((((((iter))))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) __UNIQUE_ID___y1075 = ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done); ((__UNIQUE_ID___x1074) < (__UNIQUE_ID___y1075) ? (__UNIQUE_ID___x1074) : (__UNIQUE_ID___y1075)); })))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((iter))))))).bi_idx])->bv_offset + ((((((iter)))))).bi_bvec_done) % ((1UL) << 12)))) * 0l)) : (int *)8))))), (((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((iter))))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((iter))))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((iter))))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) ? ((((((iter))))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done)), ({ typeof((((((iter))))).bi_size) __UNIQUE_ID___x1074 = ((((((iter))))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) __UNIQUE_ID___y1075 = ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done); ((__UNIQUE_ID___x1074) < (__UNIQUE_ID___y1075) ? (__UNIQUE_ID___x1074) : (__UNIQUE_ID___y1075)); })))) < ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((iter))))))).bi_idx])->bv_offset + ((((((iter)))))).bi_bvec_done) % ((1UL) << 12)))) ? ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((iter))))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((iter))))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((iter))))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) ? ((((((iter))))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done)), ({ typeof((((((iter))))).bi_size) __UNIQUE_ID___x1074 = ((((((iter))))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) __UNIQUE_ID___y1075 = ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done); ((__UNIQUE_ID___x1074) < (__UNIQUE_ID___y1075) ? (__UNIQUE_ID___x1074) : (__UNIQUE_ID___y1075)); })))) : ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((iter))))))).bi_idx])->bv_offset + ((((((iter)))))).bi_bvec_done) % ((1UL) << 12))))), ({ typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((iter))))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((iter))))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((iter))))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) ? ((((((iter))))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done)), ({ typeof((((((iter))))).bi_size) __UNIQUE_ID___x1074 = ((((((iter))))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) __UNIQUE_ID___y1075 = ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done); ((__UNIQUE_ID___x1074) < (__UNIQUE_ID___y1075) ? (__UNIQUE_ID___x1074) : (__UNIQUE_ID___y1075)); })))) __UNIQUE_ID___x1076 = ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((iter))))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((iter))))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((iter))))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) ? ((((((iter))))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done)), ({ typeof((((((iter))))).bi_size) __UNIQUE_ID___x1074 = ((((((iter))))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done) __UNIQUE_ID___y1075 = ((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_len - (((((iter))))).bi_bvec_done); ((__UNIQUE_ID___x1074) < (__UNIQUE_ID___y1075) ? (__UNIQUE_ID___x1074) : (__UNIQUE_ID___y1075)); })))); typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((iter))))))).bi_idx])->bv_offset + ((((((iter)))))).bi_bvec_done) % ((1UL) << 12)))) __UNIQUE_ID___y1077 = ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((iter))))))).bi_idx])->bv_offset + ((((((iter)))))).bi_bvec_done) % ((1UL) << 12)))); ((__UNIQUE_ID___x1076) < (__UNIQUE_ID___y1077) ? (__UNIQUE_ID___x1076) : (__UNIQUE_ID___y1077)); })), .bv_offset = (((&((((((bio))->bi_io_vec))))[((((((iter)))))).bi_idx])->bv_offset + (((((iter))))).bi_bvec_done) % ((1UL) << 12)), })), 1); bio_advance_iter((bio), &(iter), (bv).bv_len)) + segs++; + + return segs; +} +# 224 "./include/linux/bio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_get(struct bio *bio) +{ + bio->bi_flags |= (1 << BIO_REFFED); + do { } while (0); + atomic_inc(&bio->__bi_cnt); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_cnt_set(struct bio *bio, unsigned int count) +{ + if (count != 1) { + bio->bi_flags |= (1 << BIO_REFFED); + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + } + atomic_set(&bio->__bi_cnt, count); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_flagged(struct bio *bio, unsigned int bit) +{ + return (bio->bi_flags & (1U << bit)) != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_set_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_flags |= (1U << bit); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_clear_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_flags &= ~(1U << bit); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) +{ + *bv = ((struct bio_vec) { .bv_page = (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_page) + (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) / ((1UL) << 12))), .bv_len = __builtin_choose_expr(((!!(sizeof((typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1078 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1079 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1078) < (__UNIQUE_ID___y1079) ? (__UNIQUE_ID___x1078) : (__UNIQUE_ID___y1079)); })))) *)1 == (typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1078 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1079 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1078) < (__UNIQUE_ID___y1079) ? (__UNIQUE_ID___x1078) : (__UNIQUE_ID___y1079)); })))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) * 0l)) : (int *)8))))), (((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1078 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1079 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1078) < (__UNIQUE_ID___y1079) ? (__UNIQUE_ID___x1078) : (__UNIQUE_ID___y1079)); })))) < ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) ? ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1078 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1079 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1078) < (__UNIQUE_ID___y1079) ? (__UNIQUE_ID___x1078) : (__UNIQUE_ID___y1079)); })))) : ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12))))), ({ typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1078 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1079 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1078) < (__UNIQUE_ID___y1079) ? (__UNIQUE_ID___x1078) : (__UNIQUE_ID___y1079)); })))) __UNIQUE_ID___x1080 = ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1078 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1079 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1078) < (__UNIQUE_ID___y1079) ? (__UNIQUE_ID___x1078) : (__UNIQUE_ID___y1079)); })))); typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) __UNIQUE_ID___y1081 = ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))); ((__UNIQUE_ID___x1080) < (__UNIQUE_ID___y1081) ? (__UNIQUE_ID___x1080) : (__UNIQUE_ID___y1081)); })), .bv_offset = (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_offset + (((((bio)->bi_iter)))).bi_bvec_done) % ((1UL) << 12)), }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) +{ + struct bvec_iter iter = bio->bi_iter; + int idx; + + if (__builtin_expect(!!(!((bio)->bi_iter.bi_size != ((struct bio_vec) { .bv_page = (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_page) + (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) / ((1UL) << 12))), .bv_len = __builtin_choose_expr(((!!(sizeof((typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1082 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1083 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1082) < (__UNIQUE_ID___y1083) ? (__UNIQUE_ID___x1082) : (__UNIQUE_ID___y1083)); })))) *)1 == (typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1082 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1083 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1082) < (__UNIQUE_ID___y1083) ? (__UNIQUE_ID___x1082) : (__UNIQUE_ID___y1083)); })))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) * 0l)) : (int *)8))))), (((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1082 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1083 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1082) < (__UNIQUE_ID___y1083) ? (__UNIQUE_ID___x1082) : (__UNIQUE_ID___y1083)); })))) < ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) ? ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1082 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1083 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1082) < (__UNIQUE_ID___y1083) ? (__UNIQUE_ID___x1082) : (__UNIQUE_ID___y1083)); })))) : ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12))))), ({ typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1082 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1083 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1082) < (__UNIQUE_ID___y1083) ? (__UNIQUE_ID___x1082) : (__UNIQUE_ID___y1083)); })))) __UNIQUE_ID___x1084 = ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1082 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1083 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1082) < (__UNIQUE_ID___y1083) ? (__UNIQUE_ID___x1082) : (__UNIQUE_ID___y1083)); })))); typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) __UNIQUE_ID___y1085 = ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))); ((__UNIQUE_ID___x1084) < (__UNIQUE_ID___y1085) ? (__UNIQUE_ID___x1084) : (__UNIQUE_ID___y1085)); })), .bv_offset = (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_offset + (((((bio)->bi_iter)))).bi_bvec_done) % ((1UL) << 12)), }).bv_len)), 0)) { + *bv = ((struct bio_vec) { .bv_page = (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_page) + (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) / ((1UL) << 12))), .bv_len = __builtin_choose_expr(((!!(sizeof((typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1086 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1087 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1086) < (__UNIQUE_ID___y1087) ? (__UNIQUE_ID___x1086) : (__UNIQUE_ID___y1087)); })))) *)1 == (typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1086 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1087 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1086) < (__UNIQUE_ID___y1087) ? (__UNIQUE_ID___x1086) : (__UNIQUE_ID___y1087)); })))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) * 0l)) : (int *)8))))), (((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1086 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1087 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1086) < (__UNIQUE_ID___y1087) ? (__UNIQUE_ID___x1086) : (__UNIQUE_ID___y1087)); })))) < ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) ? ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1086 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1087 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1086) < (__UNIQUE_ID___y1087) ? (__UNIQUE_ID___x1086) : (__UNIQUE_ID___y1087)); })))) : ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12))))), ({ typeof((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1086 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1087 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1086) < (__UNIQUE_ID___y1087) ? (__UNIQUE_ID___x1086) : (__UNIQUE_ID___y1087)); })))) __UNIQUE_ID___x1088 = ((unsigned)(__builtin_choose_expr(((!!(sizeof((typeof((((((bio)->bi_iter)))).bi_size) *)1 == (typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((((((bio)->bi_iter)))).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) * 0l)) : (int *)8))))), (((((((bio)->bi_iter)))).bi_size) < ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) ? ((((((bio)->bi_iter)))).bi_size) : ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done)), ({ typeof((((((bio)->bi_iter)))).bi_size) __UNIQUE_ID___x1086 = ((((((bio)->bi_iter)))).bi_size); typeof((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done) __UNIQUE_ID___y1087 = ((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_len - (((((bio)->bi_iter)))).bi_bvec_done); ((__UNIQUE_ID___x1086) < (__UNIQUE_ID___y1087) ? (__UNIQUE_ID___x1086) : (__UNIQUE_ID___y1087)); })))); typeof((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))) __UNIQUE_ID___y1089 = ((unsigned)(((1UL) << 12) - (((&(((((((bio))->bi_io_vec)))))[(((((((bio)->bi_iter)))))).bi_idx])->bv_offset + ((((((bio)->bi_iter))))).bi_bvec_done) % ((1UL) << 12)))); ((__UNIQUE_ID___x1088) < (__UNIQUE_ID___y1089) ? (__UNIQUE_ID___x1088) : (__UNIQUE_ID___y1089)); })), .bv_offset = (((&((((((bio))->bi_io_vec))))[((((((bio)->bi_iter))))).bi_idx])->bv_offset + (((((bio)->bi_iter)))).bi_bvec_done) % ((1UL) << 12)), }); + return; + } + + bio_advance_iter(bio, &iter, iter.bi_size); + + if (!iter.bi_bvec_done) + idx = iter.bi_idx - 1; + else + idx = iter.bi_idx; + + *bv = bio->bi_io_vec[idx]; + + + + + + if (iter.bi_bvec_done) + bv->bv_len = iter.bi_bvec_done; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio_vec *bio_first_bvec_all(struct bio *bio) +{ + ({ int __ret_warn_on = !!(bio_flagged(bio, BIO_CLONED)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1090)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/bio.h"), "i" (289), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1091)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1092)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return bio->bi_io_vec; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *bio_first_page_all(struct bio *bio) +{ + return bio_first_bvec_all(bio)->bv_page; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio_vec *bio_last_bvec_all(struct bio *bio) +{ + ({ int __ret_warn_on = !!(bio_flagged(bio, BIO_CLONED)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1093)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/bio.h"), "i" (300), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1094)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1095)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return &bio->bi_io_vec[bio->bi_vcnt - 1]; +} + +enum bip_flags { + BIP_BLOCK_INTEGRITY = 1 << 0, + BIP_MAPPED_INTEGRITY = 1 << 1, + BIP_CTRL_NOCHECK = 1 << 2, + BIP_DISK_NOCHECK = 1 << 3, + BIP_IP_CHECKSUM = 1 << 4, +}; + + + + +struct bio_integrity_payload { + struct bio *bip_bio; + + struct bvec_iter bip_iter; + + unsigned short bip_slab; + unsigned short bip_vcnt; + unsigned short bip_max_vcnt; + unsigned short bip_flags; + + struct bvec_iter bio_iter; + + struct work_struct bip_work; + + struct bio_vec *bip_vec; + struct bio_vec bip_inline_vecs[]; +}; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio_integrity_payload *bio_integrity(struct bio *bio) +{ + if (bio->bi_opf & (1ULL << __REQ_INTEGRITY)) + return bio->bi_integrity; + + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + + if (bip) + return bip->bip_flags & flag; + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) sector_t bip_get_seed(struct bio_integrity_payload *bip) +{ + return bip->bip_iter.bi_sector; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bip_set_seed(struct bio_integrity_payload *bip, + sector_t seed) +{ + bip->bip_iter.bi_sector = seed; +} + + + +extern void bio_trim(struct bio *bio, int offset, int size); +extern struct bio *bio_split(struct bio *bio, int sectors, + gfp_t gfp, struct bio_set *bs); +# 380 "./include/linux/bio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio *bio_next_split(struct bio *bio, int sectors, + gfp_t gfp, struct bio_set *bs) +{ + if (sectors >= (((bio)->bi_iter).bi_size >> 9)) + return bio; + + return bio_split(bio, sectors, gfp, bs); +} + +enum { + BIOSET_NEED_BVECS = ((((1UL))) << (0)), + BIOSET_NEED_RESCUER = ((((1UL))) << (1)), +}; +extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); +extern void bioset_exit(struct bio_set *); +extern int biovec_init_pool(mempool_t *pool, int pool_entries); +extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); + +extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *); +extern void bio_put(struct bio *); + +extern void __bio_clone_fast(struct bio *, struct bio *); +extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); + +extern struct bio_set fs_bio_set; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) +{ + return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) +{ + return bio_alloc_bioset(gfp_mask, nr_iovecs, ((void *)0)); +} + +extern blk_qc_t submit_bio(struct bio *); + +extern void bio_endio(struct bio *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_io_error(struct bio *bio) +{ + bio->bi_status = (( blk_status_t)10); + bio_endio(bio); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_wouldblock_error(struct bio *bio) +{ + bio_set_flag(bio, BIO_QUIET); + bio->bi_status = (( blk_status_t)12); + bio_endio(bio); +} + +struct request_queue; + +extern int submit_bio_wait(struct bio *bio); +extern void bio_advance(struct bio *, unsigned); + +extern void bio_init(struct bio *bio, struct bio_vec *table, + unsigned short max_vecs); +extern void bio_uninit(struct bio *); +extern void bio_reset(struct bio *); +void bio_chain(struct bio *, struct bio *); + +extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); +extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, + unsigned int, unsigned int); +bool __bio_try_merge_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off, bool *same_page); +void __bio_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off); +int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); +void bio_release_pages(struct bio *bio, bool mark_dirty); +extern void bio_set_pages_dirty(struct bio *bio); +extern void bio_check_pages_dirty(struct bio *bio); + +extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, + struct bio *src, struct bvec_iter *src_iter); +extern void bio_copy_data(struct bio *dst, struct bio *src); +extern void bio_list_copy_data(struct bio *dst, struct bio *src); +extern void bio_free_pages(struct bio *bio); +void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); +void bio_truncate(struct bio *bio, unsigned new_size); +void guard_bio_eod(struct bio *bio); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void zero_fill_bio(struct bio *bio) +{ + zero_fill_bio_iter(bio, bio->bi_iter); +} + +extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); +extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); +extern unsigned int bvec_nr_vecs(unsigned short idx); +extern const char *bio_devname(struct bio *bio, char *buffer); +# 495 "./include/linux/bio.h" +void bio_associate_blkg_from_page(struct bio *bio, struct page *page); + + + + + + +void bio_disassociate_blkg(struct bio *bio); +void bio_associate_blkg(struct bio *bio); +void bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css); +void bio_clone_blkg_association(struct bio *dst, struct bio *src); +# 547 "./include/linux/bio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +{ + return lowmem_page_address(bvec->bv_page) + bvec->bv_offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bvec_kunmap_irq(char *buffer, unsigned long *flags) +{ + *flags = 0; +} +# 565 "./include/linux/bio.h" +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bio_list_empty(const struct bio_list *bl) +{ + return bl->head == ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_list_init(struct bio_list *bl) +{ + bl->head = bl->tail = ((void *)0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned bio_list_size(const struct bio_list *bl) +{ + unsigned sz = 0; + struct bio *bio; + + for (bio = (bl)->head; bio; bio = bio->bi_next) + sz++; + + return sz; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_list_add(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = ((void *)0); + + if (bl->tail) + bl->tail->bi_next = bio; + else + bl->head = bio; + + bl->tail = bio; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_list_add_head(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = bl->head; + + bl->head = bio; + + if (!bl->tail) + bl->tail = bio; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->tail) + bl->tail->bi_next = bl2->head; + else + bl->head = bl2->head; + + bl->tail = bl2->tail; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_list_merge_head(struct bio_list *bl, + struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->head) + bl2->tail->bi_next = bl->head; + else + bl->tail = bl2->tail; + + bl->head = bl2->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio *bio_list_peek(struct bio_list *bl) +{ + return bl->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio *bio_list_pop(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + if (bio) { + bl->head = bl->head->bi_next; + if (!bl->head) + bl->tail = ((void *)0); + + bio->bi_next = ((void *)0); + } + + return bio; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio *bio_list_get(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + bl->head = bl->tail = ((void *)0); + + return bio; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_inc_remaining(struct bio *bio) +{ + bio_set_flag(bio, BIO_CHAIN); + do { } while (0); + atomic_inc(&bio->__bi_remaining); +} +# 693 "./include/linux/bio.h" +struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + + mempool_t bio_pool; + mempool_t bvec_pool; + + mempool_t bio_integrity_pool; + mempool_t bvec_integrity_pool; + + + + + + + spinlock_t rescue_lock; + struct bio_list rescue_list; + struct work_struct rescue_work; + struct workqueue_struct *rescue_workqueue; +}; + +struct biovec_slab { + int nr_vecs; + char *name; + struct kmem_cache *slab; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bioset_initialized(struct bio_set *bs) +{ + return bs->bio_slab != ((void *)0); +} +# 740 "./include/linux/bio.h" +extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); +extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); +extern bool bio_integrity_prep(struct bio *); +extern void bio_integrity_advance(struct bio *, unsigned int); +extern void bio_integrity_trim(struct bio *); +extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); +extern int bioset_integrity_create(struct bio_set *, int); +extern void bioset_integrity_free(struct bio_set *); +extern void bio_integrity_init(void); +# 820 "./include/linux/bio.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_set_polled(struct bio *bio, struct kiocb *kiocb) +{ + bio->bi_opf |= (1ULL << __REQ_HIPRI); + if (!is_sync_kiocb(kiocb)) + bio->bi_opf |= (1ULL << __REQ_NOWAIT); +} +# 22 "./include/linux/blkdev.h" 2 + + +# 1 "./include/linux/bsg.h" 1 + + + + +# 1 "./include/uapi/linux/bsg.h" 1 +# 22 "./include/uapi/linux/bsg.h" +struct sg_io_v4 { + __s32 guard; + __u32 protocol; + __u32 subprotocol; + + + __u32 request_len; + __u64 request; + __u64 request_tag; + __u32 request_attr; + __u32 request_priority; + __u32 request_extra; + __u32 max_response_len; + __u64 response; + + + __u32 dout_iovec_count; + + __u32 dout_xfer_len; + __u32 din_iovec_count; + __u32 din_xfer_len; + __u64 dout_xferp; + __u64 din_xferp; + + __u32 timeout; + __u32 flags; + __u64 usr_ptr; + __u32 spare_in; + + __u32 driver_status; + __u32 transport_status; + __u32 device_status; + __u32 retry_delay; + __u32 info; + __u32 duration; + __u32 response_len; + __s32 din_resid; + __s32 dout_resid; + __u64 generated_tag; + __u32 spare_out; + + __u32 padding; +}; +# 6 "./include/linux/bsg.h" 2 + +struct request; + + +struct bsg_ops { + int (*check_proto)(struct sg_io_v4 *hdr); + int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr, + fmode_t mode); + int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); + void (*free_rq)(struct request *rq); +}; + +struct bsg_class_device { + struct device *class_dev; + int minor; + struct request_queue *queue; + const struct bsg_ops *ops; +}; + +int bsg_register_queue(struct request_queue *q, struct device *parent, + const char *name, const struct bsg_ops *ops); +int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); +void bsg_unregister_queue(struct request_queue *q); +# 25 "./include/linux/blkdev.h" 2 + + + +# 1 "./include/linux/scatterlist.h" 1 +# 9 "./include/linux/scatterlist.h" +# 1 "./arch/x86/include/asm/io.h" 1 +# 44 "./arch/x86/include/asm/io.h" +# 1 "./arch/x86/include/generated/asm/early_ioremap.h" 1 +# 1 "./include/asm-generic/early_ioremap.h" 1 +# 11 "./include/asm-generic/early_ioremap.h" +extern void *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap_ro(resource_size_t phys_addr, + unsigned long size); +extern void *early_memremap_prot(resource_size_t phys_addr, + unsigned long size, unsigned long prot_val); +extern void early_iounmap(void *addr, unsigned long size); +extern void early_memunmap(void *addr, unsigned long size); + + + + + +extern void early_ioremap_shutdown(void); + + + +extern void early_ioremap_init(void); + + +extern void early_ioremap_setup(void); + + + + + +extern void early_ioremap_reset(void); + + + + +extern void copy_from_early_mem(void *dest, phys_addr_t src, + unsigned long size); +# 1 "./arch/x86/include/generated/asm/early_ioremap.h" 2 +# 45 "./arch/x86/include/asm/io.h" 2 +# 57 "./arch/x86/include/asm/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) :"memory"); return ret; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) :"memory"); return ret; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) :"memory"); return ret; } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char __readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) ); return ret; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short __readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) ); return ret; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) ); return ret; } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) :"memory"); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) :"memory"); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) :"memory"); } + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) ); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) ); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) ); } +# 95 "./arch/x86/include/asm/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 readq(const volatile void *addr) { u64 ret; asm volatile("mov" "q" " %1,%0":"=r" (ret) :"m" (*(volatile u64 *)addr) :"memory"); return ret; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 __readq(const volatile void *addr) { u64 ret; asm volatile("mov" "q" " %1,%0":"=r" (ret) :"m" (*(volatile u64 *)addr) ); return ret; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writeq(u64 val, volatile void *addr) { asm volatile("mov" "q" " %0,%1": :"r" (val), "m" (*(volatile u64 *)addr) :"memory"); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __writeq(u64 val, volatile void *addr) { asm volatile("mov" "q" " %0,%1": :"r" (val), "m" (*(volatile u64 *)addr) ); } +# 113 "./arch/x86/include/asm/io.h" +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); +extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); +# 129 "./arch/x86/include/asm/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) phys_addr_t virt_to_phys(volatile void *address) +{ + return __phys_addr((unsigned long)(address)); +} +# 148 "./arch/x86/include/asm/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *phys_to_virt(phys_addr_t address) +{ + return ((void *)((unsigned long)(address)+((unsigned long)page_offset_base))); +} +# 164 "./arch/x86/include/asm/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int isa_virt_to_bus(volatile void *address) +{ + return (unsigned int)virt_to_phys(address); +} +# 183 "./arch/x86/include/asm/io.h" +extern void *ioremap_uc(resource_size_t offset, unsigned long size); + +extern void *ioremap_cache(resource_size_t offset, unsigned long size); + +extern void *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); + +extern void *ioremap_encrypted(resource_size_t phys_addr, unsigned long size); +# 206 "./arch/x86/include/asm/io.h" +void *ioremap(resource_size_t offset, unsigned long size); + + +extern void iounmap(volatile void *addr); + + +extern void set_iounmap_nonlazy(void); + + + +void memcpy_fromio(void *, const volatile void *, size_t); +void memcpy_toio(volatile void *, const void *, size_t); +void memset_io(volatile void *, int, size_t); + + + + + +# 1 "./include/asm-generic/iomap.h" 1 +# 29 "./include/asm-generic/iomap.h" +extern unsigned int ioread8(void *); +extern unsigned int ioread16(void *); +extern unsigned int ioread16be(void *); +extern unsigned int ioread32(void *); +extern unsigned int ioread32be(void *); + +extern u64 ioread64(void *); +extern u64 ioread64be(void *); + + + + + + + +extern u64 ioread64_lo_hi(void *addr); +extern u64 ioread64_hi_lo(void *addr); +extern u64 ioread64be_lo_hi(void *addr); +extern u64 ioread64be_hi_lo(void *addr); + + +extern void iowrite8(u8, void *); +extern void iowrite16(u16, void *); +extern void iowrite16be(u16, void *); +extern void iowrite32(u32, void *); +extern void iowrite32be(u32, void *); + +extern void iowrite64(u64, void *); +extern void iowrite64be(u64, void *); + + + + + + + +extern void iowrite64_lo_hi(u64 val, void *addr); +extern void iowrite64_hi_lo(u64 val, void *addr); +extern void iowrite64be_lo_hi(u64 val, void *addr); +extern void iowrite64be_hi_lo(u64 val, void *addr); +# 82 "./include/asm-generic/iomap.h" +extern void ioread8_rep(void *port, void *buf, unsigned long count); +extern void ioread16_rep(void *port, void *buf, unsigned long count); +extern void ioread32_rep(void *port, void *buf, unsigned long count); + +extern void iowrite8_rep(void *port, const void *buf, unsigned long count); +extern void iowrite16_rep(void *port, const void *buf, unsigned long count); +extern void iowrite32_rep(void *port, const void *buf, unsigned long count); + + + +extern void *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void *); +# 106 "./include/asm-generic/iomap.h" +struct pci_dev; +extern void pci_iounmap(struct pci_dev *dev, void *); + + + + + + +# 1 "./include/asm-generic/pci_iomap.h" 1 +# 10 "./include/asm-generic/pci_iomap.h" +struct pci_dev; + + +extern void *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max); +extern void *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +extern void *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +# 115 "./include/asm-generic/iomap.h" 2 +# 225 "./arch/x86/include/asm/io.h" 2 +# 238 "./arch/x86/include/asm/io.h" +extern void native_io_delay(void); + +extern int io_delay_type; +extern void io_delay_init(void); +# 262 "./arch/x86/include/asm/io.h" +extern struct static_key_false sev_enable_key; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sev_key_active(void) +{ + return ({ bool branch; if (__builtin_types_compatible_p(typeof(*&sev_enable_key), struct static_key_true)) branch = arch_static_branch_jump(&(&sev_enable_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&sev_enable_key), struct static_key_false)) branch = arch_static_branch(&(&sev_enable_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }); +} +# 334 "./arch/x86/include/asm/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outb(unsigned char value, int port) { asm volatile("out" "b" " %" "b" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char inb(int port) { unsigned char value; asm volatile("in" "b" " %w1, %" "b" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outb_p(unsigned char value, int port) { outb(value, port); slow_down_io(); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char inb_p(int port) { unsigned char value = inb(port); slow_down_io(); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsb(int port, const void *addr, unsigned long count) { if (sev_key_active()) { unsigned char *value = (unsigned char *)addr; while (count) { outb(*value, port); value++; count--; } } else { asm volatile("rep; outs" "b" : "+S"(addr), "+c"(count) : "d"(port) : "memory"); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insb(int port, void *addr, unsigned long count) { if (sev_key_active()) { unsigned char *value = (unsigned char *)addr; while (count) { *value = inb(port); value++; count--; } } else { asm volatile("rep; ins" "b" : "+D"(addr), "+c"(count) : "d"(port) : "memory"); } } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outw(unsigned short value, int port) { asm volatile("out" "w" " %" "w" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short inw(int port) { unsigned short value; asm volatile("in" "w" " %w1, %" "w" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outw_p(unsigned short value, int port) { outw(value, port); slow_down_io(); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short inw_p(int port) { unsigned short value = inw(port); slow_down_io(); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsw(int port, const void *addr, unsigned long count) { if (sev_key_active()) { unsigned short *value = (unsigned short *)addr; while (count) { outw(*value, port); value++; count--; } } else { asm volatile("rep; outs" "w" : "+S"(addr), "+c"(count) : "d"(port) : "memory"); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insw(int port, void *addr, unsigned long count) { if (sev_key_active()) { unsigned short *value = (unsigned short *)addr; while (count) { *value = inw(port); value++; count--; } } else { asm volatile("rep; ins" "w" : "+D"(addr), "+c"(count) : "d"(port) : "memory"); } } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outl(unsigned int value, int port) { asm volatile("out" "l" " %" "" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int inl(int port) { unsigned int value; asm volatile("in" "l" " %w1, %" "" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outl_p(unsigned int value, int port) { outl(value, port); slow_down_io(); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int inl_p(int port) { unsigned int value = inl(port); slow_down_io(); return value; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsl(int port, const void *addr, unsigned long count) { if (sev_key_active()) { unsigned int *value = (unsigned int *)addr; while (count) { outl(*value, port); value++; count--; } } else { asm volatile("rep; outs" "l" : "+S"(addr), "+c"(count) : "d"(port) : "memory"); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insl(int port, void *addr, unsigned long count) { if (sev_key_active()) { unsigned int *value = (unsigned int *)addr; while (count) { *value = inl(port); value++; count--; } } else { asm volatile("rep; ins" "l" : "+D"(addr), "+c"(count) : "d"(port) : "memory"); } } +# 358 "./arch/x86/include/asm/io.h" +extern void *xlate_dev_mem_ptr(phys_addr_t phys); +extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); + + + + +extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, + enum page_cache_mode pcm); +extern void *ioremap_wc(resource_size_t offset, unsigned long size); + +extern void *ioremap_wt(resource_size_t offset, unsigned long size); + + +extern bool is_early_ioremap_ptep(pte_t *ptep); + + + +# 1 "./include/asm-generic/io.h" 1 +# 18 "./include/asm-generic/io.h" +# 1 "./arch/x86/include/generated/asm/mmiowb.h" 1 +# 19 "./include/asm-generic/io.h" 2 +# 317 "./include/asm-generic/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void readsb(const volatile void *addr, void *buffer, + unsigned int count) +{ + if (count) { + u8 *buf = buffer; + + do { + u8 x = __readb(addr); + *buf++ = x; + } while (--count); + } +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void readsw(const volatile void *addr, void *buffer, + unsigned int count) +{ + if (count) { + u16 *buf = buffer; + + do { + u16 x = __readw(addr); + *buf++ = x; + } while (--count); + } +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void readsl(const volatile void *addr, void *buffer, + unsigned int count) +{ + if (count) { + u32 *buf = buffer; + + do { + u32 x = __readl(addr); + *buf++ = x; + } while (--count); + } +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void readsq(const volatile void *addr, void *buffer, + unsigned int count) +{ + if (count) { + u64 *buf = buffer; + + do { + u64 x = __readq(addr); + *buf++ = x; + } while (--count); + } +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writesb(volatile void *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u8 *buf = buffer; + + do { + __writeb(*buf++, addr); + } while (--count); + } +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writesw(volatile void *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u16 *buf = buffer; + + do { + __writew(*buf++, addr); + } while (--count); + } +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writesl(volatile void *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u32 *buf = buffer; + + do { + __writel(*buf++, addr); + } while (--count); + } +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void writesq(volatile void *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u64 *buf = buffer; + + do { + __writeq(*buf++, addr); + } while (--count); + } +} +# 526 "./include/asm-generic/io.h" +# 1 "./include/linux/logic_pio.h" 1 +# 11 "./include/linux/logic_pio.h" +# 1 "./include/linux/fwnode.h" 1 +# 14 "./include/linux/fwnode.h" +struct fwnode_operations; +struct device; + +struct fwnode_handle { + struct fwnode_handle *secondary; + const struct fwnode_operations *ops; + struct device *dev; +}; + + + + + + + +struct fwnode_endpoint { + unsigned int port; + unsigned int id; + const struct fwnode_handle *local_fwnode; +}; +# 43 "./include/linux/fwnode.h" +struct fwnode_reference_args { + struct fwnode_handle *fwnode; + unsigned int nargs; + u64 args[8]; +}; +# 110 "./include/linux/fwnode.h" +struct fwnode_operations { + struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); + void (*put)(struct fwnode_handle *fwnode); + bool (*device_is_available)(const struct fwnode_handle *fwnode); + const void *(*device_get_match_data)(const struct fwnode_handle *fwnode, + const struct device *dev); + bool (*property_present)(const struct fwnode_handle *fwnode, + const char *propname); + int (*property_read_int_array)(const struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval); + int + (*property_read_string_array)(const struct fwnode_handle *fwnode_handle, + const char *propname, const char **val, + size_t nval); + const char *(*get_name)(const struct fwnode_handle *fwnode); + const char *(*get_name_prefix)(const struct fwnode_handle *fwnode); + struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode); + struct fwnode_handle * + (*get_next_child_node)(const struct fwnode_handle *fwnode, + struct fwnode_handle *child); + struct fwnode_handle * + (*get_named_child_node)(const struct fwnode_handle *fwnode, + const char *name); + int (*get_reference_args)(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args); + struct fwnode_handle * + (*graph_get_next_endpoint)(const struct fwnode_handle *fwnode, + struct fwnode_handle *prev); + struct fwnode_handle * + (*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode); + struct fwnode_handle * + (*graph_get_port_parent)(struct fwnode_handle *fwnode); + int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint); + int (*add_links)(const struct fwnode_handle *fwnode, + struct device *dev); +}; +# 173 "./include/linux/fwnode.h" +extern u32 fw_devlink_get_flags(void); +void fw_devlink_pause(void); +void fw_devlink_resume(void); +# 12 "./include/linux/logic_pio.h" 2 + +enum { + LOGIC_PIO_INDIRECT, + LOGIC_PIO_CPU_MMIO, +}; + +struct logic_pio_hwaddr { + struct list_head list; + struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; + unsigned long flags; + + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth); + void (*out)(void *hostdata, unsigned long addr, u32 val, + size_t dwidth); + u32 (*ins)(void *hostdata, unsigned long addr, void *buffer, + size_t dwidth, unsigned int count); + void (*outs)(void *hostdata, unsigned long addr, const void *buffer, + size_t dwidth, unsigned int count); +}; +# 116 "./include/linux/logic_pio.h" +struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode); +unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, + resource_size_t hw_addr, resource_size_t size); +int logic_pio_register_range(struct logic_pio_hwaddr *newrange); +void logic_pio_unregister_range(struct logic_pio_hwaddr *range); +resource_size_t logic_pio_to_hwaddr(unsigned long pio); +unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); +# 527 "./include/asm-generic/io.h" 2 +# 658 "./include/asm-generic/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insb_p(unsigned long addr, void *buffer, unsigned int count) +{ + insb(addr, buffer, count); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insw_p(unsigned long addr, void *buffer, unsigned int count) +{ + insw(addr, buffer, count); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void insl_p(unsigned long addr, void *buffer, unsigned int count) +{ + insl(addr, buffer, count); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsb_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsb(addr, buffer, count); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsw_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsw(addr, buffer, count); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void outsl_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsl(addr, buffer, count); +} +# 911 "./include/asm-generic/io.h" +# 1 "./include/linux/vmalloc.h" 1 +# 13 "./include/linux/vmalloc.h" +# 1 "./arch/x86/include/asm/vmalloc.h" 1 +# 14 "./include/linux/vmalloc.h" 2 + +struct vm_area_struct; +struct notifier_block; +# 55 "./include/linux/vmalloc.h" +struct vm_struct { + struct vm_struct *next; + void *addr; + unsigned long size; + unsigned long flags; + struct page **pages; + unsigned int nr_pages; + phys_addr_t phys_addr; + const void *caller; +}; + +struct vmap_area { + unsigned long va_start; + unsigned long va_end; + + struct rb_node rb_node; + struct list_head list; +# 80 "./include/linux/vmalloc.h" + union { + unsigned long subtree_max_size; + struct vm_struct *vm; + struct llist_node purge_list; + }; +}; + + + + +extern void vm_unmap_ram(const void *mem, unsigned int count); +extern void *vm_map_ram(struct page **pages, unsigned int count, int node); +extern void vm_unmap_aliases(void); + + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) vmalloc_init(void); +extern unsigned long vmalloc_nr_pages(void); + + + + + + + +extern void *vmalloc(unsigned long size); +extern void *vzalloc(unsigned long size); +extern void *vmalloc_user(unsigned long size); +extern void *vmalloc_node(unsigned long size, int node); +extern void *vzalloc_node(unsigned long size, int node); +extern void *vmalloc_exec(unsigned long size); +extern void *vmalloc_32(unsigned long size); +extern void *vmalloc_32_user(unsigned long size); +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); +extern void *__vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, unsigned long vm_flags, int node, + const void *caller); +void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, + int node, const void *caller); + +extern void vfree(const void *addr); +extern void vfree_atomic(const void *addr); + +extern void *vmap(struct page **pages, unsigned int count, + unsigned long flags, pgprot_t prot); +extern void vunmap(const void *addr); + +extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, + unsigned long uaddr, void *kaddr, + unsigned long pgoff, unsigned long size); + +extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, + unsigned long pgoff); +# 148 "./include/linux/vmalloc.h" +void arch_sync_kernel_mappings(unsigned long start, unsigned long end); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) size_t get_vm_area_size(const struct vm_struct *area) +{ + if (!(area->flags & 0x00000040)) + + return area->size - ((1UL) << 12); + else + return area->size; + +} + +extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); +extern struct vm_struct *get_vm_area_caller(unsigned long size, + unsigned long flags, const void *caller); +extern struct vm_struct *__get_vm_area_caller(unsigned long size, + unsigned long flags, + unsigned long start, unsigned long end, + const void *caller); +extern struct vm_struct *remove_vm_area(const void *addr); +extern struct vm_struct *find_vm_area(const void *addr); + + +extern int map_kernel_range_noflush(unsigned long start, unsigned long size, + pgprot_t prot, struct page **pages); +int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, + struct page **pages); +extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); +extern void unmap_kernel_range(unsigned long addr, unsigned long size); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_vm_flush_reset_perms(void *addr) +{ + struct vm_struct *vm = find_vm_area(addr); + + if (vm) + vm->flags |= 0x00000100; +} +# 207 "./include/linux/vmalloc.h" +extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); +extern void free_vm_area(struct vm_struct *area); + + +extern long vread(char *buf, char *addr, unsigned long count); +extern long vwrite(char *buf, char *addr, unsigned long count); + + + + +extern struct list_head vmap_area_list; +extern __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) void vm_area_add_early(struct vm_struct *vm); +extern __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) void vm_area_register_early(struct vm_struct *vm, size_t align); + + + +struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, + const size_t *sizes, int nr_vms, + size_t align); + +void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); +# 250 "./include/linux/vmalloc.h" +int register_vmap_purge_notifier(struct notifier_block *nb); +int unregister_vmap_purge_notifier(struct notifier_block *nb); +# 912 "./include/asm-generic/io.h" 2 +# 1028 "./include/asm-generic/io.h" +extern void *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void *p); +# 1038 "./include/asm-generic/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *xlate_dev_kmem_ptr(void *addr) +{ + return addr; +} +# 376 "./arch/x86/include/asm/io.h" 2 + + + +extern int __attribute__((__warn_unused_result__)) arch_phys_wc_index(int handle); + + +extern int __attribute__((__warn_unused_result__)) arch_phys_wc_add(unsigned long base, + unsigned long size); +extern void arch_phys_wc_del(int handle); + + + + +extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size); +extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size); + + + +extern bool arch_memremap_can_ram_remap(resource_size_t offset, + unsigned long size, + unsigned long flags); + + +extern bool phys_mem_access_encrypted(unsigned long phys_addr, + unsigned long size); +# 415 "./arch/x86/include/asm/io.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void iosubmit_cmds512(void *__dst, const void *src, + size_t count) +{ + + + + + + + + volatile struct { char _[64]; } *dst = __dst; + const u8 *from = src; + const u8 *end = from + count * 64; + + while (from < end) { + + asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02" + : "=m" (dst) + : "d" (from), "a" (dst)); + from += 64; + } +} +# 10 "./include/linux/scatterlist.h" 2 + +struct scatterlist { + unsigned long page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; + + unsigned int dma_length; + +}; +# 42 "./include/linux/scatterlist.h" +struct sg_table { + struct scatterlist *sgl; + unsigned int nents; + unsigned int orig_nents; +}; +# 87 "./include/linux/scatterlist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sg_assign_page(struct scatterlist *sg, struct page *page) +{ + unsigned long page_link = sg->page_link & (0x01UL | 0x02UL); + + + + + + do { if (__builtin_expect(!!((unsigned long) page & (0x01UL | 0x02UL)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1096)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/scatterlist.h"), "i" (95), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1097)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + do { if (__builtin_expect(!!(((sg)->page_link & 0x01UL)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1098)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/scatterlist.h"), "i" (97), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1099)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + sg->page_link = page_link | (unsigned long) page; +} +# 116 "./include/linux/scatterlist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sg_set_page(struct scatterlist *sg, struct page *page, + unsigned int len, unsigned int offset) +{ + sg_assign_page(sg, page); + sg->offset = offset; + sg->length = len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *sg_page(struct scatterlist *sg) +{ + + do { if (__builtin_expect(!!(((sg)->page_link & 0x01UL)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1100)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/scatterlist.h"), "i" (127), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1101)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + return (struct page *)((sg)->page_link & ~(0x01UL | 0x02UL)); +} +# 139 "./include/linux/scatterlist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sg_set_buf(struct scatterlist *sg, const void *buf, + unsigned int buflen) +{ + + do { if (__builtin_expect(!!(!__virt_addr_valid((unsigned long) (buf))), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1102)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/scatterlist.h"), "i" (143), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1103)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + sg_set_page(sg, (((struct page *)vmemmap_base) + (__phys_addr((unsigned long)(buf)) >> 12)), buflen, ((unsigned long)(buf) & ~(~(((1UL) << 12)-1)))); +} +# 178 "./include/linux/scatterlist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sg_chain(struct scatterlist *prv, unsigned int prv_nents, + struct scatterlist *sgl) +{ + + + + prv[prv_nents - 1].offset = 0; + prv[prv_nents - 1].length = 0; + + + + + + prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01UL) + & ~0x02UL; +} +# 204 "./include/linux/scatterlist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sg_mark_end(struct scatterlist *sg) +{ + + + + sg->page_link |= 0x02UL; + sg->page_link &= ~0x01UL; +} +# 221 "./include/linux/scatterlist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sg_unmark_end(struct scatterlist *sg) +{ + sg->page_link &= ~0x02UL; +} +# 236 "./include/linux/scatterlist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dma_addr_t sg_phys(struct scatterlist *sg) +{ + return ((dma_addr_t)(unsigned long)((sg_page(sg)) - ((struct page *)vmemmap_base)) << 12) + sg->offset; +} +# 251 "./include/linux/scatterlist.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *sg_virt(struct scatterlist *sg) +{ + return lowmem_page_address(sg_page(sg)) + sg->offset; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sg_init_marker(struct scatterlist *sgl, + unsigned int nents) +{ + sg_mark_end(&sgl[nents - 1]); +} + +int sg_nents(struct scatterlist *sg); +int sg_nents_for_len(struct scatterlist *sg, u64 len); +struct scatterlist *sg_next(struct scatterlist *); +struct scatterlist *sg_last(struct scatterlist *s, unsigned int); +void sg_init_table(struct scatterlist *, unsigned int); +void sg_init_one(struct scatterlist *, const void *, unsigned int); +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask); + +typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); +typedef void (sg_free_fn)(struct scatterlist *, unsigned int); + +void __sg_free_table(struct sg_table *, unsigned int, unsigned int, + sg_free_fn *); +void sg_free_table(struct sg_table *); +int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, + struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); +int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); +int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, unsigned int max_segment, + gfp_t gfp_mask); +int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, gfp_t gfp_mask); + + +struct scatterlist *sgl_alloc_order(unsigned long long length, + unsigned int order, bool chainable, + gfp_t gfp, unsigned int *nent_p); +struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, + unsigned int *nent_p); +void sgl_free_n_order(struct scatterlist *sgl, int nents, int order); +void sgl_free_order(struct scatterlist *sgl, int order); +void sgl_free(struct scatterlist *sgl); + + +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer); + +size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, + const void *buf, size_t buflen); +size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen); + +size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, + const void *buf, size_t buflen, off_t skip); +size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip); +size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, + size_t buflen, off_t skip); +# 349 "./include/linux/scatterlist.h" +void sg_free_table_chained(struct sg_table *table, + unsigned nents_first_chunk); +int sg_alloc_table_chained(struct sg_table *table, int nents, + struct scatterlist *first_chunk, + unsigned nents_first_chunk); +# 366 "./include/linux/scatterlist.h" +struct sg_page_iter { + struct scatterlist *sg; + unsigned int sg_pgoffset; + + + unsigned int __nents; + int __pg_advance; + +}; +# 383 "./include/linux/scatterlist.h" +struct sg_dma_page_iter { + struct sg_page_iter base; +}; + +bool __sg_page_iter_next(struct sg_page_iter *piter); +bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter); +void __sg_page_iter_start(struct sg_page_iter *piter, + struct scatterlist *sglist, unsigned int nents, + unsigned long pgoffset); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *sg_page_iter_page(struct sg_page_iter *piter) +{ + return (((struct page *)vmemmap_base) + ((unsigned long)(((sg_page(piter->sg))) - ((struct page *)vmemmap_base)) + (piter->sg_pgoffset))); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dma_addr_t +sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) +{ + return ((dma_iter->base.sg)->dma_address) + + (dma_iter->base.sg_pgoffset << 12); +} +# 491 "./include/linux/scatterlist.h" +struct sg_mapping_iter { + + struct page *page; + void *addr; + size_t length; + size_t consumed; + struct sg_page_iter piter; + + + unsigned int __offset; + unsigned int __remaining; + unsigned int __flags; +}; + +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags); +bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset); +bool sg_miter_next(struct sg_mapping_iter *miter); +void sg_miter_stop(struct sg_mapping_iter *miter); +# 29 "./include/linux/blkdev.h" 2 +# 1 "./include/uapi/linux/blkzoned.h" 1 +# 32 "./include/uapi/linux/blkzoned.h" +enum blk_zone_type { + BLK_ZONE_TYPE_CONVENTIONAL = 0x1, + BLK_ZONE_TYPE_SEQWRITE_REQ = 0x2, + BLK_ZONE_TYPE_SEQWRITE_PREF = 0x3, +}; +# 65 "./include/uapi/linux/blkzoned.h" +enum blk_zone_cond { + BLK_ZONE_COND_NOT_WP = 0x0, + BLK_ZONE_COND_EMPTY = 0x1, + BLK_ZONE_COND_IMP_OPEN = 0x2, + BLK_ZONE_COND_EXP_OPEN = 0x3, + BLK_ZONE_COND_CLOSED = 0x4, + BLK_ZONE_COND_READONLY = 0xD, + BLK_ZONE_COND_FULL = 0xE, + BLK_ZONE_COND_OFFLINE = 0xF, +}; +# 94 "./include/uapi/linux/blkzoned.h" +struct blk_zone { + __u64 start; + __u64 len; + __u64 wp; + __u8 type; + __u8 cond; + __u8 non_seq; + __u8 reset; + __u8 reserved[36]; +}; +# 115 "./include/uapi/linux/blkzoned.h" +struct blk_zone_report { + __u64 sector; + __u32 nr_zones; + __u8 reserved[4]; + struct blk_zone zones[0]; +}; +# 129 "./include/uapi/linux/blkzoned.h" +struct blk_zone_range { + __u64 sector; + __u64 nr_sectors; +}; +# 30 "./include/linux/blkdev.h" 2 + +struct module; +struct scsi_ioctl_command; + +struct request_queue; +struct elevator_queue; +struct blk_trace; +struct request; +struct sg_io_hdr; +struct bsg_job; +struct blkcg_gq; +struct blk_flush_queue; +struct pr_ops; +struct rq_qos; +struct blk_queue_stats; +struct blk_stat_callback; +struct blk_keyslot_manager; +# 63 "./include/linux/blkdev.h" +typedef void (rq_end_io_fn)(struct request *, blk_status_t); + + + +typedef __u32 req_flags_t; +# 119 "./include/linux/blkdev.h" +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + + + + + + + +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + + unsigned int cmd_flags; + req_flags_t rq_flags; + + int tag; + int internal_tag; + + + unsigned int __data_len; + sector_t __sector; + + struct bio *bio; + struct bio *biotail; + + struct list_head queuelist; +# 158 "./include/linux/blkdev.h" + union { + struct hlist_node hash; + struct list_head ipi_list; + }; + + + + + + + union { + struct rb_node rb_node; + struct bio_vec special_vec; + void *completion_data; + int error_count; + }; + + + + + + + + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + + struct gendisk *rq_disk; + struct hd_struct *part; + + + u64 alloc_time_ns; + + + u64 start_time_ns; + + u64 io_start_time_ns; + + + unsigned short wbt_flags; + + + + + + + unsigned short stats_sectors; + + + + + + unsigned short nr_phys_segments; + + + unsigned short nr_integrity_segments; + + + + struct bio_crypt_ctx *crypt_ctx; + struct blk_ksm_keyslot *crypt_keyslot; + + + unsigned short write_hint; + unsigned short ioprio; + + enum mq_rq_state state; + refcount_t ref; + + unsigned int timeout; + unsigned long deadline; + + union { + struct __call_single_data csd; + u64 fifo_time; + }; + + + + + rq_end_io_fn *end_io; + void *end_io_data; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_op_is_scsi(unsigned int op) +{ + return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_op_is_private(unsigned int op) +{ + return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_rq_is_scsi(struct request *rq) +{ + return blk_op_is_scsi(((rq)->cmd_flags & ((1 << 8) - 1))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_rq_is_private(struct request *rq) +{ + return blk_op_is_private(((rq)->cmd_flags & ((1 << 8) - 1))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_rq_is_passthrough(struct request *rq) +{ + return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_is_passthrough(struct bio *bio) +{ + unsigned op = ((bio)->bi_opf & ((1 << 8) - 1)); + + return blk_op_is_scsi(op) || blk_op_is_private(op); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} + +# 1 "./include/linux/elevator.h" 1 + + + + + +# 1 "./include/linux/hashtable.h" 1 +# 34 "./include/linux/hashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + ((&ht[i])->first = ((void *)0)); +} +# 76 "./include/linux/hashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} +# 105 "./include/linux/hashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hash_del_rcu(struct hlist_node *node) +{ + hlist_del_init_rcu(node); +} +# 7 "./include/linux/elevator.h" 2 + + + +struct io_cq; +struct elevator_type; + +struct blk_mq_debugfs_attr; + + + + + +enum elv_merge { + ELEVATOR_NO_MERGE = 0, + ELEVATOR_FRONT_MERGE = 1, + ELEVATOR_BACK_MERGE = 2, + ELEVATOR_DISCARD_MERGE = 3, +}; + +struct blk_mq_alloc_data; +struct blk_mq_hw_ctx; + +struct elevator_mq_ops { + int (*init_sched)(struct request_queue *, struct elevator_type *); + void (*exit_sched)(struct elevator_queue *); + int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); + void (*depth_updated)(struct blk_mq_hw_ctx *); + + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); + bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *q, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); + void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *); + void (*prepare_request)(struct request *); + void (*finish_request)(struct request *); + void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); + struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); + bool (*has_work)(struct blk_mq_hw_ctx *); + void (*completed_request)(struct request *, u64); + void (*requeue_request)(struct request *); + struct request *(*former_request)(struct request_queue *, struct request *); + struct request *(*next_request)(struct request_queue *, struct request *); + void (*init_icq)(struct io_cq *); + void (*exit_icq)(struct io_cq *); +}; + + + +struct elv_fs_entry { + struct attribute attr; + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); +}; + + + + +struct elevator_type +{ + + struct kmem_cache *icq_cache; + + + struct elevator_mq_ops ops; + + size_t icq_size; + size_t icq_align; + struct elv_fs_entry *elevator_attrs; + const char *elevator_name; + const char *elevator_alias; + const unsigned int elevator_features; + struct module *elevator_owner; + + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; + + + + char icq_cache_name[(16) + 6]; + struct list_head list; +}; + + + +void elv_rqhash_del(struct request_queue *q, struct request *rq); +void elv_rqhash_add(struct request_queue *q, struct request *rq); +void elv_rqhash_reposition(struct request_queue *q, struct request *rq); +struct request *elv_rqhash_find(struct request_queue *q, sector_t offset); + + + + +struct elevator_queue +{ + struct elevator_type *type; + void *elevator_data; + struct kobject kobj; + struct mutex sysfs_lock; + unsigned int registered:1; + struct hlist_head hash[1 << (6)]; +}; + + + + +extern enum elv_merge elv_merge(struct request_queue *, struct request **, + struct bio *); +extern void elv_merge_requests(struct request_queue *, struct request *, + struct request *); +extern void elv_merged_request(struct request_queue *, struct request *, + enum elv_merge); +extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); +extern struct request *elv_former_request(struct request_queue *, struct request *); +extern struct request *elv_latter_request(struct request_queue *, struct request *); + + + + +extern int elv_register(struct elevator_type *); +extern void elv_unregister(struct elevator_type *); + + + + +extern ssize_t elv_iosched_show(struct request_queue *, char *); +extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); + +extern bool elv_bio_merge_ok(struct request *, struct bio *); +extern struct elevator_queue *elevator_alloc(struct request_queue *, + struct elevator_type *); + + + + +extern struct request *elv_rb_former_request(struct request_queue *, struct request *); +extern struct request *elv_rb_latter_request(struct request_queue *, struct request *); + + + + +extern void elv_rb_add(struct rb_root *, struct request *); +extern void elv_rb_del(struct rb_root *, struct request *); +extern struct request *elv_rb_find(struct rb_root *, sector_t); +# 289 "./include/linux/blkdev.h" 2 + +struct blk_queue_ctx; + +typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); + +struct bio_vec; + +enum blk_eh_timer_return { + BLK_EH_DONE, + BLK_EH_RESET_TIMER, +}; + +enum blk_queue_state { + Queue_down, + Queue_up, +}; +# 315 "./include/linux/blkdev.h" +enum blk_zoned_model { + BLK_ZONED_NONE, + BLK_ZONED_HA, + BLK_ZONED_HM, +}; + +struct queue_limits { + unsigned long bounce_pfn; + unsigned long seg_boundary_mask; + unsigned long virt_boundary_mask; + + unsigned int max_hw_sectors; + unsigned int max_dev_sectors; + unsigned int chunk_sectors; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int logical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + unsigned int max_discard_sectors; + unsigned int max_hw_discard_sectors; + unsigned int max_write_same_sectors; + unsigned int max_write_zeroes_sectors; + unsigned int max_zone_append_sectors; + unsigned int discard_granularity; + unsigned int discard_alignment; + + unsigned short max_segments; + unsigned short max_integrity_segments; + unsigned short max_discard_segments; + + unsigned char misaligned; + unsigned char discard_misaligned; + unsigned char raid_partial_stripes_expensive; + enum blk_zoned_model zoned; +}; + +typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, + void *data); + + + + +int blkdev_report_zones(struct block_device *bdev, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data); +unsigned int blkdev_nr_zones(struct gendisk *disk); +extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, + sector_t sectors, sector_t nr_sectors, + gfp_t gfp_mask); +int blk_revalidate_disk_zones(struct gendisk *disk, + void (*update_driver_data)(struct gendisk *disk)); + +extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +# 397 "./include/linux/blkdev.h" +struct request_queue { + struct request *last_merge; + struct elevator_queue *elevator; + + struct blk_queue_stats *stats; + struct rq_qos *rq_qos; + + make_request_fn *make_request_fn; + + const struct blk_mq_ops *mq_ops; + + + struct blk_mq_ctx *queue_ctx; + + unsigned int queue_depth; + + + struct blk_mq_hw_ctx **queue_hw_ctx; + unsigned int nr_hw_queues; + + struct backing_dev_info *backing_dev_info; + + + + + + void *queuedata; + + + + + unsigned long queue_flags; + + + + + + atomic_t pm_only; + + + + + + int id; + + + + + gfp_t bounce_gfp; + + spinlock_t queue_lock; + + + + + struct kobject kobj; + + + + + struct kobject *mq_kobj; + + + struct blk_integrity integrity; + + + + struct device *dev; + int rpm_status; + unsigned int nr_pending; + + + + + + unsigned long nr_requests; + + unsigned int dma_pad_mask; + unsigned int dma_alignment; + + + + struct blk_keyslot_manager *ksm; + + + unsigned int rq_timeout; + int poll_nsec; + + struct blk_stat_callback *poll_cb; + struct blk_rq_stat poll_stat[16]; + + struct timer_list timeout; + struct work_struct timeout_work; + + struct list_head icq_list; + + unsigned long blkcg_pols[(((5) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; + struct blkcg_gq *root_blkg; + struct list_head blkg_list; + + + struct queue_limits limits; + + unsigned int required_elevator_features; +# 520 "./include/linux/blkdev.h" + unsigned int nr_zones; + unsigned long *conv_zones_bitmap; + unsigned long *seq_zones_wlock; + + + + + + unsigned int sg_timeout; + unsigned int sg_reserved_size; + int node; + + struct blk_trace *blk_trace; + struct mutex blk_trace_mutex; + + + + + struct blk_flush_queue *fq; + + struct list_head requeue_list; + spinlock_t requeue_lock; + struct delayed_work requeue_work; + + struct mutex sysfs_lock; + struct mutex sysfs_dir_lock; + + + + + + struct list_head unused_hctx_list; + spinlock_t unused_hctx_lock; + + int mq_freeze_depth; + + + struct bsg_class_device bsg_dev; + + + + + struct throtl_data *td; + + struct callback_head callback_head; + wait_queue_head_t mq_freeze_wq; + + + + + struct mutex mq_freeze_lock; + struct percpu_ref q_usage_counter; + + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; + struct bio_set bio_split; + + + struct dentry *debugfs_dir; + struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; + + + bool mq_sysfs_init_done; + + size_t cmd_size; + + struct work_struct release_work; + + + u64 write_hints[5]; +}; +# 624 "./include/linux/blkdev.h" +void blk_queue_flag_set(unsigned int flag, struct request_queue *q); +void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); +bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); +# 663 "./include/linux/blkdev.h" +extern void blk_set_pm_only(struct request_queue *q); +extern void blk_clear_pm_only(struct request_queue *q); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_account_rq(struct request *rq) +{ + return (rq->rq_flags & (( req_flags_t)(1 << 1))) && !blk_rq_is_passthrough(rq); +} +# 682 "./include/linux/blkdev.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool queue_is_mq(struct request_queue *q) +{ + return q->mq_ops; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum blk_zoned_model +blk_queue_zoned_model(struct request_queue *q) +{ + return q->limits.zoned; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_queue_is_zoned(struct request_queue *q) +{ + switch (blk_queue_zoned_model(q)) { + case BLK_ZONED_HA: + case BLK_ZONED_HM: + return true; + default: + return false; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) sector_t blk_queue_zone_sectors(struct request_queue *q) +{ + return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_queue_nr_zones(struct request_queue *q) +{ + return blk_queue_is_zoned(q) ? q->nr_zones : 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_queue_zone_no(struct request_queue *q, + sector_t sector) +{ + if (!blk_queue_is_zoned(q)) + return 0; + return sector >> ( __builtin_constant_p(q->limits.chunk_sectors) ? ( __builtin_constant_p(q->limits.chunk_sectors) ? ( (q->limits.chunk_sectors) < 2 ? 0 : (q->limits.chunk_sectors) & (1ULL << 63) ? 63 : (q->limits.chunk_sectors) & (1ULL << 62) ? 62 : (q->limits.chunk_sectors) & (1ULL << 61) ? 61 : (q->limits.chunk_sectors) & (1ULL << 60) ? 60 : (q->limits.chunk_sectors) & (1ULL << 59) ? 59 : (q->limits.chunk_sectors) & (1ULL << 58) ? 58 : (q->limits.chunk_sectors) & (1ULL << 57) ? 57 : (q->limits.chunk_sectors) & (1ULL << 56) ? 56 : (q->limits.chunk_sectors) & (1ULL << 55) ? 55 : (q->limits.chunk_sectors) & (1ULL << 54) ? 54 : (q->limits.chunk_sectors) & (1ULL << 53) ? 53 : (q->limits.chunk_sectors) & (1ULL << 52) ? 52 : (q->limits.chunk_sectors) & (1ULL << 51) ? 51 : (q->limits.chunk_sectors) & (1ULL << 50) ? 50 : (q->limits.chunk_sectors) & (1ULL << 49) ? 49 : (q->limits.chunk_sectors) & (1ULL << 48) ? 48 : (q->limits.chunk_sectors) & (1ULL << 47) ? 47 : (q->limits.chunk_sectors) & (1ULL << 46) ? 46 : (q->limits.chunk_sectors) & (1ULL << 45) ? 45 : (q->limits.chunk_sectors) & (1ULL << 44) ? 44 : (q->limits.chunk_sectors) & (1ULL << 43) ? 43 : (q->limits.chunk_sectors) & (1ULL << 42) ? 42 : (q->limits.chunk_sectors) & (1ULL << 41) ? 41 : (q->limits.chunk_sectors) & (1ULL << 40) ? 40 : (q->limits.chunk_sectors) & (1ULL << 39) ? 39 : (q->limits.chunk_sectors) & (1ULL << 38) ? 38 : (q->limits.chunk_sectors) & (1ULL << 37) ? 37 : (q->limits.chunk_sectors) & (1ULL << 36) ? 36 : (q->limits.chunk_sectors) & (1ULL << 35) ? 35 : (q->limits.chunk_sectors) & (1ULL << 34) ? 34 : (q->limits.chunk_sectors) & (1ULL << 33) ? 33 : (q->limits.chunk_sectors) & (1ULL << 32) ? 32 : (q->limits.chunk_sectors) & (1ULL << 31) ? 31 : (q->limits.chunk_sectors) & (1ULL << 30) ? 30 : (q->limits.chunk_sectors) & (1ULL << 29) ? 29 : (q->limits.chunk_sectors) & (1ULL << 28) ? 28 : (q->limits.chunk_sectors) & (1ULL << 27) ? 27 : (q->limits.chunk_sectors) & (1ULL << 26) ? 26 : (q->limits.chunk_sectors) & (1ULL << 25) ? 25 : (q->limits.chunk_sectors) & (1ULL << 24) ? 24 : (q->limits.chunk_sectors) & (1ULL << 23) ? 23 : (q->limits.chunk_sectors) & (1ULL << 22) ? 22 : (q->limits.chunk_sectors) & (1ULL << 21) ? 21 : (q->limits.chunk_sectors) & (1ULL << 20) ? 20 : (q->limits.chunk_sectors) & (1ULL << 19) ? 19 : (q->limits.chunk_sectors) & (1ULL << 18) ? 18 : (q->limits.chunk_sectors) & (1ULL << 17) ? 17 : (q->limits.chunk_sectors) & (1ULL << 16) ? 16 : (q->limits.chunk_sectors) & (1ULL << 15) ? 15 : (q->limits.chunk_sectors) & (1ULL << 14) ? 14 : (q->limits.chunk_sectors) & (1ULL << 13) ? 13 : (q->limits.chunk_sectors) & (1ULL << 12) ? 12 : (q->limits.chunk_sectors) & (1ULL << 11) ? 11 : (q->limits.chunk_sectors) & (1ULL << 10) ? 10 : (q->limits.chunk_sectors) & (1ULL << 9) ? 9 : (q->limits.chunk_sectors) & (1ULL << 8) ? 8 : (q->limits.chunk_sectors) & (1ULL << 7) ? 7 : (q->limits.chunk_sectors) & (1ULL << 6) ? 6 : (q->limits.chunk_sectors) & (1ULL << 5) ? 5 : (q->limits.chunk_sectors) & (1ULL << 4) ? 4 : (q->limits.chunk_sectors) & (1ULL << 3) ? 3 : (q->limits.chunk_sectors) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(q->limits.chunk_sectors) <= 4) ? __ilog2_u32(q->limits.chunk_sectors) : __ilog2_u64(q->limits.chunk_sectors) ); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_queue_zone_is_seq(struct request_queue *q, + sector_t sector) +{ + if (!blk_queue_is_zoned(q)) + return false; + if (!q->conv_zones_bitmap) + return true; + return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); +} +# 749 "./include/linux/blkdev.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rq_is_sync(struct request *rq) +{ + return op_is_sync(rq->cmd_flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rq_mergeable(struct request *rq) +{ + if (blk_rq_is_passthrough(rq)) + return false; + + if (((rq)->cmd_flags & ((1 << 8) - 1)) == REQ_OP_FLUSH) + return false; + + if (((rq)->cmd_flags & ((1 << 8) - 1)) == REQ_OP_WRITE_ZEROES) + return false; + + if (((rq)->cmd_flags & ((1 << 8) - 1)) == REQ_OP_ZONE_APPEND) + return false; + + if (rq->cmd_flags & ((1ULL << __REQ_NOMERGE) | (1ULL << __REQ_PREFLUSH) | (1ULL << __REQ_FUA))) + return false; + if (rq->rq_flags & ((( req_flags_t)(1 << 1)) | (( req_flags_t)(1 << 3)) | (( req_flags_t)(1 << 4)) | (( req_flags_t)(1 << 18)))) + return false; + + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_write_same_mergeable(struct bio *a, struct bio *b) +{ + if ((((&(((((a))->bi_io_vec)))[(((((a)->bi_iter)))).bi_idx])->bv_page) + (((&((((((a))->bi_io_vec))))[((((((a)->bi_iter))))).bi_idx])->bv_offset + (((((a)->bi_iter)))).bi_bvec_done) / ((1UL) << 12))) == (((&(((((b))->bi_io_vec)))[(((((b)->bi_iter)))).bi_idx])->bv_page) + (((&((((((b))->bi_io_vec))))[((((((b)->bi_iter))))).bi_idx])->bv_offset + (((((b)->bi_iter)))).bi_bvec_done) / ((1UL) << 12))) && + (((&(((((a))->bi_io_vec)))[(((((a)->bi_iter)))).bi_idx])->bv_offset + ((((a)->bi_iter))).bi_bvec_done) % ((1UL) << 12)) == (((&(((((b))->bi_io_vec)))[(((((b)->bi_iter)))).bi_idx])->bv_offset + ((((b)->bi_iter))).bi_bvec_done) % ((1UL) << 12))) + return true; + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_queue_depth(struct request_queue *q) +{ + if (q->queue_depth) + return q->queue_depth; + + return q->nr_requests; +} + +extern unsigned long blk_max_low_pfn, blk_max_pfn; +# 817 "./include/linux/blkdev.h" +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + unsigned long offset; + int null_mapped; + int from_user; +}; + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; +# 856 "./include/linux/blkdev.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rq_flush_dcache_pages(struct request *rq) +{ +} + + +extern int blk_register_queue(struct gendisk *disk); +extern void blk_unregister_queue(struct gendisk *disk); +extern blk_qc_t generic_make_request(struct bio *bio); +extern blk_qc_t direct_make_request(struct bio *bio); +extern void blk_rq_init(struct request_queue *q, struct request *rq); +extern void blk_put_request(struct request *); +extern struct request *blk_get_request(struct request_queue *, unsigned int op, + blk_mq_req_flags_t flags); +extern int blk_lld_busy(struct request_queue *q); +extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data); +extern void blk_rq_unprep_clone(struct request *rq); +extern blk_status_t blk_insert_cloned_request(struct request_queue *q, + struct request *rq); +extern int blk_rq_append_bio(struct request *rq, struct bio **bio); +extern void blk_queue_split(struct request_queue *, struct bio **); +extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); +extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, + unsigned int, void *); +extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, + unsigned int, void *); +extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, + struct scsi_ioctl_command *); +extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void *argp); +extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void *argp); + +extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); +extern void blk_queue_exit(struct request_queue *q); +extern void blk_sync_queue(struct request_queue *q); +extern int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void *, unsigned long, + gfp_t); +extern int blk_rq_unmap_user(struct bio *); +extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); +extern int blk_rq_map_user_iov(struct request_queue *, struct request *, + struct rq_map_data *, const struct iov_iter *, + gfp_t); +extern void blk_execute_rq(struct request_queue *, struct gendisk *, + struct request *, int); +extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, + struct request *, int, rq_end_io_fn *); + + +extern const char *blk_op_str(unsigned int op); + +int blk_status_to_errno(blk_status_t status); +blk_status_t errno_to_blk_status(int errno); + +int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct request_queue *bdev_get_queue(struct block_device *bdev) +{ + return bdev->bd_disk->queue; +} +# 940 "./include/linux/blkdev.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) sector_t blk_rq_pos(const struct request *rq) +{ + return rq->__sector; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_bytes(const struct request *rq) +{ + return rq->__data_len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int blk_rq_cur_bytes(const struct request *rq) +{ + return rq->bio ? bio_cur_bytes(rq->bio) : 0; +} + +extern unsigned int blk_rq_err_bytes(const struct request *rq); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_sectors(const struct request *rq) +{ + return blk_rq_bytes(rq) >> 9; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_cur_sectors(const struct request *rq) +{ + return blk_rq_cur_bytes(rq) >> 9; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_stats_sectors(const struct request *rq) +{ + return rq->stats_sectors; +} + + + + +const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_zone_no(struct request *rq) +{ + return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_zone_is_seq(struct request *rq) +{ + return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); +} +# 994 "./include/linux/blkdev.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_payload_bytes(struct request *rq) +{ + if (rq->rq_flags & (( req_flags_t)(1 << 18))) + return rq->special_vec.bv_len; + return blk_rq_bytes(rq); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio_vec req_bvec(struct request *rq) +{ + if (rq->rq_flags & (( req_flags_t)(1 << 18))) + return rq->special_vec; + return ((struct bio_vec) { .bv_page = ((&(((rq->bio->bi_io_vec)))[(((rq->bio->bi_iter))).bi_idx])->bv_page), .bv_len = __builtin_choose_expr(((!!(sizeof((typeof(((rq->bio->bi_iter)).bi_size) *)1 == (typeof((&(((rq->bio->bi_io_vec)))[(((rq->bio->bi_iter))).bi_idx])->bv_len - ((rq->bio->bi_iter)).bi_bvec_done) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(((rq->bio->bi_iter)).bi_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((&(((rq->bio->bi_io_vec)))[(((rq->bio->bi_iter))).bi_idx])->bv_len - ((rq->bio->bi_iter)).bi_bvec_done) * 0l)) : (int *)8))))), ((((rq->bio->bi_iter)).bi_size) < ((&(((rq->bio->bi_io_vec)))[(((rq->bio->bi_iter))).bi_idx])->bv_len - ((rq->bio->bi_iter)).bi_bvec_done) ? (((rq->bio->bi_iter)).bi_size) : ((&(((rq->bio->bi_io_vec)))[(((rq->bio->bi_iter))).bi_idx])->bv_len - ((rq->bio->bi_iter)).bi_bvec_done)), ({ typeof(((rq->bio->bi_iter)).bi_size) __UNIQUE_ID___x1104 = (((rq->bio->bi_iter)).bi_size); typeof((&(((rq->bio->bi_io_vec)))[(((rq->bio->bi_iter))).bi_idx])->bv_len - ((rq->bio->bi_iter)).bi_bvec_done) __UNIQUE_ID___y1105 = ((&(((rq->bio->bi_io_vec)))[(((rq->bio->bi_iter))).bi_idx])->bv_len - ((rq->bio->bi_iter)).bi_bvec_done); ((__UNIQUE_ID___x1104) < (__UNIQUE_ID___y1105) ? (__UNIQUE_ID___x1104) : (__UNIQUE_ID___y1105)); })), .bv_offset = ((&(((rq->bio->bi_io_vec)))[(((rq->bio->bi_iter))).bi_idx])->bv_offset + ((rq->bio->bi_iter)).bi_bvec_done), }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_queue_get_max_sectors(struct request_queue *q, + int op) +{ + if (__builtin_expect(!!(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE), 0)) + return __builtin_choose_expr(((!!(sizeof((typeof(q->limits.max_discard_sectors) *)1 == (typeof((~0U) >> 9) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(q->limits.max_discard_sectors) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((~0U) >> 9) * 0l)) : (int *)8))))), ((q->limits.max_discard_sectors) < ((~0U) >> 9) ? (q->limits.max_discard_sectors) : ((~0U) >> 9)), ({ typeof(q->limits.max_discard_sectors) __UNIQUE_ID___x1106 = (q->limits.max_discard_sectors); typeof((~0U) >> 9) __UNIQUE_ID___y1107 = ((~0U) >> 9); ((__UNIQUE_ID___x1106) < (__UNIQUE_ID___y1107) ? (__UNIQUE_ID___x1106) : (__UNIQUE_ID___y1107)); })) + ; + + if (__builtin_expect(!!(op == REQ_OP_WRITE_SAME), 0)) + return q->limits.max_write_same_sectors; + + if (__builtin_expect(!!(op == REQ_OP_WRITE_ZEROES), 0)) + return q->limits.max_write_zeroes_sectors; + + return q->limits.max_sectors; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_max_size_offset(struct request_queue *q, + sector_t offset) +{ + if (!q->limits.chunk_sectors) + return q->limits.max_sectors; + + return __builtin_choose_expr(((!!(sizeof((typeof(q->limits.max_sectors) *)1 == (typeof((unsigned int)(q->limits.chunk_sectors - (offset & (q->limits.chunk_sectors - 1)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(q->limits.max_sectors) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned int)(q->limits.chunk_sectors - (offset & (q->limits.chunk_sectors - 1)))) * 0l)) : (int *)8))))), ((q->limits.max_sectors) < ((unsigned int)(q->limits.chunk_sectors - (offset & (q->limits.chunk_sectors - 1)))) ? (q->limits.max_sectors) : ((unsigned int)(q->limits.chunk_sectors - (offset & (q->limits.chunk_sectors - 1))))), ({ typeof(q->limits.max_sectors) __UNIQUE_ID___x1108 = (q->limits.max_sectors); typeof((unsigned int)(q->limits.chunk_sectors - (offset & (q->limits.chunk_sectors - 1)))) __UNIQUE_ID___y1109 = ((unsigned int)(q->limits.chunk_sectors - (offset & (q->limits.chunk_sectors - 1)))); ((__UNIQUE_ID___x1108) < (__UNIQUE_ID___y1109) ? (__UNIQUE_ID___x1108) : (__UNIQUE_ID___y1109)); })) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_get_max_sectors(struct request *rq, + sector_t offset) +{ + struct request_queue *q = rq->q; + + if (blk_rq_is_passthrough(rq)) + return q->limits.max_hw_sectors; + + if (!q->limits.chunk_sectors || + ((rq)->cmd_flags & ((1 << 8) - 1)) == REQ_OP_DISCARD || + ((rq)->cmd_flags & ((1 << 8) - 1)) == REQ_OP_SECURE_ERASE) + return blk_queue_get_max_sectors(q, ((rq)->cmd_flags & ((1 << 8) - 1))); + + return __builtin_choose_expr(((!!(sizeof((typeof(blk_max_size_offset(q, offset)) *)1 == (typeof(blk_queue_get_max_sectors(q, ((rq)->cmd_flags & ((1 << 8) - 1)))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(blk_max_size_offset(q, offset)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(blk_queue_get_max_sectors(q, ((rq)->cmd_flags & ((1 << 8) - 1)))) * 0l)) : (int *)8))))), ((blk_max_size_offset(q, offset)) < (blk_queue_get_max_sectors(q, ((rq)->cmd_flags & ((1 << 8) - 1)))) ? (blk_max_size_offset(q, offset)) : (blk_queue_get_max_sectors(q, ((rq)->cmd_flags & ((1 << 8) - 1))))), ({ typeof(blk_max_size_offset(q, offset)) __UNIQUE_ID___x1110 = (blk_max_size_offset(q, offset)); typeof(blk_queue_get_max_sectors(q, ((rq)->cmd_flags & ((1 << 8) - 1)))) __UNIQUE_ID___y1111 = (blk_queue_get_max_sectors(q, ((rq)->cmd_flags & ((1 << 8) - 1)))); ((__UNIQUE_ID___x1110) < (__UNIQUE_ID___y1111) ? (__UNIQUE_ID___x1110) : (__UNIQUE_ID___y1111)); })) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blk_rq_count_bios(struct request *rq) +{ + unsigned int nr_bios = 0; + struct bio *bio; + + if ((rq->bio)) for (bio = (rq)->bio; bio; bio = bio->bi_next) + nr_bios++; + + return nr_bios; +} + +void blk_steal_bios(struct bio_list *list, struct request *rq); + + + + + + + +extern bool blk_update_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes); + +extern void __blk_complete_request(struct request *); +extern void blk_abort_request(struct request *); + + + + +extern void blk_cleanup_queue(struct request_queue *); +extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); +extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_segments(struct request_queue *, unsigned short); +extern void blk_queue_max_discard_segments(struct request_queue *, + unsigned short); +extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); +extern void blk_queue_max_discard_sectors(struct request_queue *q, + unsigned int max_discard_sectors); +extern void blk_queue_max_write_same_sectors(struct request_queue *q, + unsigned int max_write_same_sectors); +extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, + unsigned int max_write_same_sectors); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_max_zone_append_sectors(struct request_queue *q, + unsigned int max_zone_append_sectors); +extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_alignment_offset(struct request_queue *q, + unsigned int alignment); +extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); +extern void blk_queue_io_min(struct request_queue *q, unsigned int min); +extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); +extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); +extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); +extern void blk_set_default_limits(struct queue_limits *lim); +extern void blk_set_stacking_limits(struct queue_limits *lim); +extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset); +extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, + sector_t offset); +extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset); +extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); +extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); +extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); +extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_update_dma_alignment(struct request_queue *, int); +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); +extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); +extern void blk_queue_required_elevator_features(struct request_queue *q, + unsigned int features); +extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, + struct device *dev); +# 1142 "./include/linux/blkdev.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short blk_rq_nr_phys_segments(struct request *rq) +{ + if (rq->rq_flags & (( req_flags_t)(1 << 18))) + return 1; + return rq->nr_phys_segments; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short blk_rq_nr_discard_segments(struct request *rq) +{ + return __builtin_choose_expr(((!!(sizeof((typeof((unsigned short)(rq->nr_phys_segments)) *)1 == (typeof((unsigned short)(1)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned short)(rq->nr_phys_segments)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned short)(1)) * 0l)) : (int *)8))))), (((unsigned short)(rq->nr_phys_segments)) > ((unsigned short)(1)) ? ((unsigned short)(rq->nr_phys_segments)) : ((unsigned short)(1))), ({ typeof((unsigned short)(rq->nr_phys_segments)) __UNIQUE_ID___x1112 = ((unsigned short)(rq->nr_phys_segments)); typeof((unsigned short)(1)) __UNIQUE_ID___y1113 = ((unsigned short)(1)); ((__UNIQUE_ID___x1112) > (__UNIQUE_ID___y1113) ? (__UNIQUE_ID___x1112) : (__UNIQUE_ID___y1113)); })); +} + +int __blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist, struct scatterlist **last_sg); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct scatterlist *last_sg = ((void *)0); + + return __blk_rq_map_sg(q, rq, sglist, &last_sg); +} +extern void blk_dump_rq_flags(struct request *, char *); +extern long nr_blockdev_pages(void); + +bool __attribute__((__warn_unused_result__)) blk_get_queue(struct request_queue *); +struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id); +extern void blk_put_queue(struct request_queue *); +extern void blk_set_queue_dying(struct request_queue *); +# 1187 "./include/linux/blkdev.h" +struct blk_plug { + struct list_head mq_list; + struct list_head cb_list; + unsigned short rq_count; + bool multiple_queues; + bool nowait; +}; + + + +struct blk_plug_cb; +typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); +struct blk_plug_cb { + struct list_head list; + blk_plug_cb_fn callback; + void *data; +}; +extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, + void *data, int size); +extern void blk_start_plug(struct blk_plug *); +extern void blk_finish_plug(struct blk_plug *); +extern void blk_flush_plug_list(struct blk_plug *, bool); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blk_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blk_schedule_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + if (plug) + blk_flush_plug_list(plug, true); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_needs_flush_plug(struct task_struct *tsk) +{ + struct blk_plug *plug = tsk->plug; + + return plug && + (!list_empty(&plug->mq_list) || + !list_empty(&plug->cb_list)); +} + +extern void blk_io_schedule(void); + +int blkdev_issue_flush(struct block_device *, gfp_t); +extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct page *page); + + + +extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); +extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, int flags, + struct bio **biop); + + + + +extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, + unsigned flags); +extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned flags); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sb_issue_discard(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) +{ + return blkdev_issue_discard(sb->s_bdev, + block << (sb->s_blocksize_bits - + 9), + nr_blocks << (sb->s_blocksize_bits - + 9), + gfp_mask, flags); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sb_issue_zeroout(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask) +{ + return blkdev_issue_zeroout(sb->s_bdev, + block << (sb->s_blocksize_bits - + 9), + nr_blocks << (sb->s_blocksize_bits - + 9), + gfp_mask, 0); +} + +extern int blk_verify_command(unsigned char *cmd, fmode_t mode); + +enum blk_default_limits { + BLK_MAX_SEGMENTS = 128, + BLK_SAFE_MAX_SECTORS = 255, + BLK_DEF_MAX_SECTORS = 2560, + BLK_MAX_SEGMENT_SIZE = 65536, + BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long queue_segment_boundary(const struct request_queue *q) +{ + return q->limits.seg_boundary_mask; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long queue_virt_boundary(const struct request_queue *q) +{ + return q->limits.virt_boundary_mask; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int queue_max_sectors(const struct request_queue *q) +{ + return q->limits.max_sectors; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int queue_max_hw_sectors(const struct request_queue *q) +{ + return q->limits.max_hw_sectors; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short queue_max_segments(const struct request_queue *q) +{ + return q->limits.max_segments; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short queue_max_discard_segments(const struct request_queue *q) +{ + return q->limits.max_discard_segments; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int queue_max_segment_size(const struct request_queue *q) +{ + return q->limits.max_segment_size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int queue_max_zone_append_sectors(const struct request_queue *q) +{ + return q->limits.max_zone_append_sectors; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned queue_logical_block_size(const struct request_queue *q) +{ + int retval = 512; + + if (q && q->limits.logical_block_size) + retval = q->limits.logical_block_size; + + return retval; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int bdev_logical_block_size(struct block_device *bdev) +{ + return queue_logical_block_size(bdev_get_queue(bdev)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int queue_physical_block_size(const struct request_queue *q) +{ + return q->limits.physical_block_size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int bdev_physical_block_size(struct block_device *bdev) +{ + return queue_physical_block_size(bdev_get_queue(bdev)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int queue_io_min(const struct request_queue *q) +{ + return q->limits.io_min; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bdev_io_min(struct block_device *bdev) +{ + return queue_io_min(bdev_get_queue(bdev)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int queue_io_opt(const struct request_queue *q) +{ + return q->limits.io_opt; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bdev_io_opt(struct block_device *bdev) +{ + return queue_io_opt(bdev_get_queue(bdev)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queue_alignment_offset(const struct request_queue *q) +{ + if (q->limits.misaligned) + return -1; + + return q->limits.alignment_offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) +{ + unsigned int granularity = __builtin_choose_expr(((!!(sizeof((typeof(lim->physical_block_size) *)1 == (typeof(lim->io_min) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(lim->physical_block_size) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(lim->io_min) * 0l)) : (int *)8))))), ((lim->physical_block_size) > (lim->io_min) ? (lim->physical_block_size) : (lim->io_min)), ({ typeof(lim->physical_block_size) __UNIQUE_ID___x1114 = (lim->physical_block_size); typeof(lim->io_min) __UNIQUE_ID___y1115 = (lim->io_min); ((__UNIQUE_ID___x1114) > (__UNIQUE_ID___y1115) ? (__UNIQUE_ID___x1114) : (__UNIQUE_ID___y1115)); })); + unsigned int alignment = ({ uint32_t __base = (granularity >> 9); uint32_t __rem; __rem = ((uint64_t)(sector)) % __base; (sector) = ((uint64_t)(sector)) / __base; __rem; }) + << 9; + + return (granularity + lim->alignment_offset - alignment) % granularity; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bdev_alignment_offset(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q->limits.misaligned) + return -1; + + if (bdev != bdev->bd_contains) + return bdev->bd_part->alignment_offset; + + return q->limits.alignment_offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queue_discard_alignment(const struct request_queue *q) +{ + if (q->limits.discard_misaligned) + return -1; + + return q->limits.discard_alignment; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) +{ + unsigned int alignment, granularity, offset; + + if (!lim->max_discard_sectors) + return 0; + + + alignment = lim->discard_alignment >> 9; + granularity = lim->discard_granularity >> 9; + if (!granularity) + return 0; + + + offset = ({ uint32_t __base = (granularity); uint32_t __rem; __rem = ((uint64_t)(sector)) % __base; (sector) = ((uint64_t)(sector)) / __base; __rem; }); + + + offset = (granularity + alignment - offset) % granularity; + + + return offset << 9; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bdev_discard_alignment(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (bdev != bdev->bd_contains) + return bdev->bd_part->discard_alignment; + + return q->limits.discard_alignment; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int bdev_write_same(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return q->limits.max_write_same_sectors; + + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return q->limits.max_write_zeroes_sectors; + + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zoned_model(q); + + return BLK_ZONED_NONE; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bdev_is_zoned(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_is_zoned(q); + + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) sector_t bdev_zone_sectors(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return blk_queue_zone_sectors(q); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int queue_dma_alignment(const struct request_queue *q) +{ + return q ? q->dma_alignment : 511; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int blk_rq_aligned(struct request_queue *q, unsigned long addr, + unsigned int len) +{ + unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; + return !(addr & alignment) && !(len & alignment); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blksize_bits(unsigned int size) +{ + unsigned int bits = 8; + do { + bits++; + size >>= 1; + } while (size > 256); + return bits; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int block_size(struct block_device *bdev) +{ + return bdev->bd_block_size; +} + +int kblockd_schedule_work(struct work_struct *work); +int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); +# 1532 "./include/linux/blkdev.h" +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1 << 0, + BLK_INTEGRITY_GENERATE = 1 << 1, + BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, + BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, +}; + +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + unsigned short interval; + const char *disk_name; +}; + +typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); +typedef void (integrity_prepare_fn) (struct request *); +typedef void (integrity_complete_fn) (struct request *, unsigned int); + +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + integrity_prepare_fn *prepare_fn; + integrity_complete_fn *complete_fn; + const char *name; +}; + +extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); +extern void blk_integrity_unregister(struct gendisk *); +extern int blk_integrity_compare(struct gendisk *, struct gendisk *); +extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, + struct scatterlist *); +extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); +extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, + struct request *); +extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, + struct bio *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return ((void *)0); + + return bi; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +{ + return blk_get_integrity(bdev->bd_disk); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return q->integrity.profile; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_integrity_rq(struct request *rq) +{ + return rq->cmd_flags & (1ULL << __REQ_INTEGRITY); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ + q->limits.max_integrity_segments = segs; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short +queue_max_integrity_segments(const struct request_queue *q) +{ + return q->limits.max_integrity_segments; +} +# 1620 "./include/linux/blkdev.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return sectors >> (bi->interval_exp - 9); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return bio_integrity_intervals(bi, sectors) * bi->tuple_size; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bio_vec *rq_integrity_vec(struct request *rq) +{ + if (({ int __ret_warn_on = !!(queue_max_integrity_segments(rq->q) > 1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1116)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/blkdev.h"), "i" (1638), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1117)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1118)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return ((void *)0); + return rq->bio->bi_integrity->bip_vec; +} +# 1731 "./include/linux/blkdev.h" +bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); + +void blk_ksm_unregister(struct request_queue *q); +# 1748 "./include/linux/blkdev.h" +struct block_device_operations { + int (*open) (struct block_device *, fmode_t); + void (*release) (struct gendisk *, fmode_t); + int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); + int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); + unsigned int (*check_events) (struct gendisk *disk, + unsigned int clearing); + + int (*media_changed) (struct gendisk *); + void (*unlock_native_capacity) (struct gendisk *); + int (*revalidate_disk) (struct gendisk *); + int (*getgeo)(struct block_device *, struct hd_geometry *); + + void (*swap_slot_free_notify) (struct block_device *, unsigned long); + int (*report_zones)(struct gendisk *, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data); + char *(*devnode)(struct gendisk *disk, umode_t *mode); + struct module *owner; + const struct pr_ops *pr_ops; +}; + + +extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t, + unsigned int, unsigned long); + + + + +extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, + unsigned long); +extern int bdev_read_page(struct block_device *, sector_t, struct page *); +extern int bdev_write_page(struct block_device *, sector_t, struct page *, + struct writeback_control *); + + +bool blk_req_needs_zone_write_lock(struct request *rq); +bool blk_req_zone_write_trylock(struct request *rq); +void __blk_req_zone_write_lock(struct request *rq); +void __blk_req_zone_write_unlock(struct request *rq); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blk_req_zone_write_lock(struct request *rq) +{ + if (blk_req_needs_zone_write_lock(rq)) + __blk_req_zone_write_lock(rq); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blk_req_zone_write_unlock(struct request *rq) +{ + if (rq->rq_flags & (( req_flags_t)(1 << 19))) + __blk_req_zone_write_unlock(rq); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_req_zone_is_write_locked(struct request *rq) +{ + return rq->q->seq_zones_wlock && + test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + if (!blk_req_needs_zone_write_lock(rq)) + return true; + return !blk_req_zone_is_write_locked(rq); +} +# 1883 "./include/linux/blkdev.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blk_wake_io_task(struct task_struct *waiter) +{ + + + + + + if (waiter == get_current()) + do { ({ int __ret_warn_on = !!(((0x0000) & (0x0004 | 0x0008 | 0x0040 | 0x0080))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1119)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/blkdev.h"), "i" (1891), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1120)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1121)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); get_current()->task_state_change = ({ __label__ __here; __here: (unsigned long)&&__here; }); get_current()->state = (0x0000); } while (0); + else + wake_up_process(waiter); +} + + +unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, + unsigned int op); +void disk_end_io_acct(struct gendisk *disk, unsigned int op, + unsigned long start_time); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long bio_start_io_acct(struct bio *bio) +{ + return disk_start_io_acct(bio->bi_disk, (((bio)->bi_iter).bi_size >> 9), ((bio)->bi_opf & ((1 << 8) - 1))); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bio_end_io_acct(struct bio *bio, unsigned long start_time) +{ + return disk_end_io_acct(bio->bi_disk, ((bio)->bi_opf & ((1 << 8) - 1)), start_time); +} +# 62 "fs/io_uring.c" 2 + +# 1 "./include/linux/net.h" 1 +# 18 "./include/linux/net.h" +# 1 "./include/linux/random.h" 1 +# 13 "./include/linux/random.h" +# 1 "./include/linux/once.h" 1 + + + + + + + +bool __do_once_start(bool *done, unsigned long *flags); +void __do_once_done(bool *done, struct static_key_true *once_key, + unsigned long *flags); +# 14 "./include/linux/random.h" 2 + +# 1 "./include/uapi/linux/random.h" 1 +# 41 "./include/uapi/linux/random.h" +struct rand_pool_info { + int entropy_count; + int buf_size; + __u32 buf[0]; +}; +# 16 "./include/linux/random.h" 2 + +struct random_ready_callback { + struct list_head list; + void (*func)(struct random_ready_callback *rdy); + struct module *owner; +}; + +extern void add_device_randomness(const void *, unsigned int); +extern void add_bootloader_randomness(const void *, unsigned int); +# 33 "./include/linux/random.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void add_latent_entropy(void) {} + + +extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value) ; +extern void add_interrupt_randomness(int irq, int irq_flags) ; + +extern void get_random_bytes(void *buf, int nbytes); +extern int wait_for_random_bytes(void); +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) rand_initialize(void); +extern bool rng_is_initialized(void); +extern int add_random_ready_callback(struct random_ready_callback *rdy); +extern void del_random_ready_callback(struct random_ready_callback *rdy); +extern int __attribute__((__warn_unused_result__)) get_random_bytes_arch(void *buf, int nbytes); + + +extern const struct file_operations random_fops, urandom_fops; + + +u32 get_random_u32(void); +u64 get_random_u64(void); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int get_random_int(void) +{ + return get_random_u32(); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_random_long(void) +{ + + return get_random_u64(); + + + +} +# 81 "./include/linux/random.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_random_canary(void) +{ + unsigned long val = get_random_long(); + + return val & 0xffffffffffffff00UL; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_random_bytes_wait(void *buf, int nbytes) +{ + int ret = wait_for_random_bytes(); + get_random_bytes(buf, nbytes); + return ret; +} +# 105 "./include/linux/random.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_random_u32_wait(u32 *out) { int ret = wait_for_random_bytes(); if (__builtin_expect(!!(ret), 0)) return ret; *out = get_random_u32(); return 0; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_random_u64_wait(u64 *out) { int ret = wait_for_random_bytes(); if (__builtin_expect(!!(ret), 0)) return ret; *out = get_random_u64(); return 0; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_random_int_wait(int *out) { int ret = wait_for_random_bytes(); if (__builtin_expect(!!(ret), 0)) return ret; *out = get_random_int(); return 0; } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int get_random_long_wait(long *out) { int ret = wait_for_random_bytes(); if (__builtin_expect(!!(ret), 0)) return ret; *out = get_random_long(); return 0; } + + +unsigned long randomize_page(unsigned long start, unsigned long range); + +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state *pcpu_state); +# 141 "./include/linux/random.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} + + +# 1 "./arch/x86/include/asm/archrandom.h" 1 +# 20 "./arch/x86/include/asm/archrandom.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) rdrand_long(unsigned long *v) +{ + bool ok; + unsigned int retry = 10; + do { + asm volatile("rdrand %[out]" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (ok), [out] "=r" (*v)); + if (ok) + return true; + } while (--retry); + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) rdrand_int(unsigned int *v) +{ + bool ok; + unsigned int retry = 10; + do { + asm volatile("rdrand %[out]" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (ok), [out] "=r" (*v)); + if (ok) + return true; + } while (--retry); + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) rdseed_long(unsigned long *v) +{ + bool ok; + asm volatile("rdseed %[out]" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (ok), [out] "=r" (*v)); + return ok; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) rdseed_int(unsigned int *v) +{ + bool ok; + asm volatile("rdseed %[out]" + "\n\t/* output condition code " "c" "*/\n" + : "=@cc" "c" (ok), [out] "=r" (*v)); + return ok; +} +# 73 "./arch/x86/include/asm/archrandom.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) arch_get_random_long(unsigned long *v) +{ + return ( __builtin_constant_p((__builtin_constant_p(( 4*32+30)) && ( (((( 4*32+30))>>5)==(0) && (1UL<<((( 4*32+30))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+30))>>5)==(1) && (1UL<<((( 4*32+30))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+30))>>5)==(2) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(3) && (1UL<<((( 4*32+30))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+30))>>5)==(4) && (1UL<<((( 4*32+30))&31) & (0) )) || (((( 4*32+30))>>5)==(5) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(6) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(7) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(8) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(9) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(10) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(11) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(12) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(13) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(14) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(15) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(16) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(17) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(18) && (1UL<<((( 4*32+30))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 4*32+30), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p(( 4*32+30)) && ( (((( 4*32+30))>>5)==(0) && (1UL<<((( 4*32+30))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+30))>>5)==(1) && (1UL<<((( 4*32+30))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+30))>>5)==(2) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(3) && (1UL<<((( 4*32+30))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+30))>>5)==(4) && (1UL<<((( 4*32+30))&31) & (0) )) || (((( 4*32+30))>>5)==(5) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(6) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(7) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(8) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(9) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(10) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(11) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(12) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(13) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(14) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(15) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(16) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(17) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(18) && (1UL<<((( 4*32+30))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 4*32+30), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has(( 4*32+30)) ) ? rdrand_long(v) : false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) arch_get_random_int(unsigned int *v) +{ + return ( __builtin_constant_p((__builtin_constant_p(( 4*32+30)) && ( (((( 4*32+30))>>5)==(0) && (1UL<<((( 4*32+30))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+30))>>5)==(1) && (1UL<<((( 4*32+30))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+30))>>5)==(2) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(3) && (1UL<<((( 4*32+30))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+30))>>5)==(4) && (1UL<<((( 4*32+30))&31) & (0) )) || (((( 4*32+30))>>5)==(5) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(6) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(7) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(8) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(9) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(10) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(11) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(12) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(13) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(14) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(15) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(16) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(17) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(18) && (1UL<<((( 4*32+30))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 4*32+30), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p(( 4*32+30)) && ( (((( 4*32+30))>>5)==(0) && (1UL<<((( 4*32+30))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 4*32+30))>>5)==(1) && (1UL<<((( 4*32+30))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 4*32+30))>>5)==(2) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(3) && (1UL<<((( 4*32+30))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 4*32+30))>>5)==(4) && (1UL<<((( 4*32+30))&31) & (0) )) || (((( 4*32+30))>>5)==(5) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(6) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(7) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(8) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(9) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(10) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(11) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(12) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(13) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(14) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(15) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(16) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(17) && (1UL<<((( 4*32+30))&31) & 0 )) || (((( 4*32+30))>>5)==(18) && (1UL<<((( 4*32+30))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 4*32+30), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has(( 4*32+30)) ) ? rdrand_int(v) : false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) arch_get_random_seed_long(unsigned long *v) +{ + return ( __builtin_constant_p((__builtin_constant_p(( 9*32+18)) && ( (((( 9*32+18))>>5)==(0) && (1UL<<((( 9*32+18))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 9*32+18))>>5)==(1) && (1UL<<((( 9*32+18))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 9*32+18))>>5)==(2) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(3) && (1UL<<((( 9*32+18))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 9*32+18))>>5)==(4) && (1UL<<((( 9*32+18))&31) & (0) )) || (((( 9*32+18))>>5)==(5) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(6) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(7) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(8) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(9) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(10) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(11) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(12) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(13) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(14) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(15) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(16) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(17) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(18) && (1UL<<((( 9*32+18))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 9*32+18), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p(( 9*32+18)) && ( (((( 9*32+18))>>5)==(0) && (1UL<<((( 9*32+18))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 9*32+18))>>5)==(1) && (1UL<<((( 9*32+18))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 9*32+18))>>5)==(2) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(3) && (1UL<<((( 9*32+18))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 9*32+18))>>5)==(4) && (1UL<<((( 9*32+18))&31) & (0) )) || (((( 9*32+18))>>5)==(5) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(6) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(7) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(8) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(9) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(10) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(11) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(12) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(13) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(14) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(15) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(16) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(17) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(18) && (1UL<<((( 9*32+18))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 9*32+18), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has(( 9*32+18)) ) ? rdseed_long(v) : false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__warn_unused_result__)) arch_get_random_seed_int(unsigned int *v) +{ + return ( __builtin_constant_p((__builtin_constant_p(( 9*32+18)) && ( (((( 9*32+18))>>5)==(0) && (1UL<<((( 9*32+18))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 9*32+18))>>5)==(1) && (1UL<<((( 9*32+18))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 9*32+18))>>5)==(2) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(3) && (1UL<<((( 9*32+18))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 9*32+18))>>5)==(4) && (1UL<<((( 9*32+18))&31) & (0) )) || (((( 9*32+18))>>5)==(5) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(6) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(7) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(8) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(9) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(10) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(11) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(12) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(13) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(14) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(15) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(16) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(17) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(18) && (1UL<<((( 9*32+18))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 9*32+18), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p(( 9*32+18)) && ( (((( 9*32+18))>>5)==(0) && (1UL<<((( 9*32+18))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 9*32+18))>>5)==(1) && (1UL<<((( 9*32+18))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 9*32+18))>>5)==(2) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(3) && (1UL<<((( 9*32+18))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 9*32+18))>>5)==(4) && (1UL<<((( 9*32+18))&31) & (0) )) || (((( 9*32+18))>>5)==(5) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(6) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(7) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(8) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(9) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(10) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(11) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(12) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(13) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(14) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(15) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(16) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(17) && (1UL<<((( 9*32+18))&31) & 0 )) || (((( 9*32+18))>>5)==(18) && (1UL<<((( 9*32+18))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 9*32+18), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has(( 9*32+18)) ) ? rdseed_int(v) : false; +} + +extern void x86_init_rdrand(struct cpuinfo_x86 *c); +# 171 "./include/linux/random.h" 2 +# 195 "./include/linux/random.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) arch_get_random_seed_long_early(unsigned long *v) +{ + ({ int __ret_warn_on = !!(system_state != SYSTEM_BOOTING); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1122)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/random.h"), "i" (197), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1123)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1124)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return arch_get_random_seed_long(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) arch_get_random_long_early(unsigned long *v) +{ + ({ int __ret_warn_on = !!(system_state != SYSTEM_BOOTING); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1125)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/random.h"), "i" (205), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1126)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1127)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return arch_get_random_long(v); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 next_pseudo_random32(u32 seed) +{ + return seed * 1664525 + 1013904223; +} +# 19 "./include/linux/net.h" 2 + + + + + + +# 1 "./include/uapi/linux/net.h" 1 +# 23 "./include/uapi/linux/net.h" +# 1 "./arch/x86/include/generated/uapi/asm/socket.h" 1 +# 24 "./include/uapi/linux/net.h" 2 +# 48 "./include/uapi/linux/net.h" +typedef enum { + SS_FREE = 0, + SS_UNCONNECTED, + SS_CONNECTING, + SS_CONNECTED, + SS_DISCONNECTING +} socket_state; +# 26 "./include/linux/net.h" 2 + +struct poll_table_struct; +struct pipe_inode_info; +struct inode; +struct file; +struct net; +# 59 "./include/linux/net.h" +enum sock_type { + SOCK_STREAM = 1, + SOCK_DGRAM = 2, + SOCK_RAW = 3, + SOCK_RDM = 4, + SOCK_SEQPACKET = 5, + SOCK_DCCP = 6, + SOCK_PACKET = 10, +}; +# 88 "./include/linux/net.h" +enum sock_shutdown_cmd { + SHUT_RD, + SHUT_WR, + SHUT_RDWR, +}; + +struct socket_wq { + + wait_queue_head_t wait; + struct fasync_struct *fasync_list; + unsigned long flags; + struct callback_head rcu; +} __attribute__((__aligned__((1 << (6))))); +# 112 "./include/linux/net.h" +struct socket { + socket_state state; + + short type; + + unsigned long flags; + + struct file *file; + struct sock *sk; + const struct proto_ops *ops; + + struct socket_wq wq; +}; + +struct vm_area_struct; +struct page; +struct sockaddr; +struct msghdr; +struct module; +struct sk_buff; +typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, + unsigned int, size_t); + +struct proto_ops { + int family; + struct module *owner; + int (*release) (struct socket *sock); + int (*bind) (struct socket *sock, + struct sockaddr *myaddr, + int sockaddr_len); + int (*connect) (struct socket *sock, + struct sockaddr *vaddr, + int sockaddr_len, int flags); + int (*socketpair)(struct socket *sock1, + struct socket *sock2); + int (*accept) (struct socket *sock, + struct socket *newsock, int flags, bool kern); + int (*getname) (struct socket *sock, + struct sockaddr *addr, + int peer); + __poll_t (*poll) (struct file *file, struct socket *sock, + struct poll_table_struct *wait); + int (*ioctl) (struct socket *sock, unsigned int cmd, + unsigned long arg); + + int (*compat_ioctl) (struct socket *sock, unsigned int cmd, + unsigned long arg); + + int (*gettstamp) (struct socket *sock, void *userstamp, + bool timeval, bool time32); + int (*listen) (struct socket *sock, int len); + int (*shutdown) (struct socket *sock, int flags); + int (*setsockopt)(struct socket *sock, int level, + int optname, char *optval, unsigned int optlen); + int (*getsockopt)(struct socket *sock, int level, + int optname, char *optval, int *optlen); + + int (*compat_setsockopt)(struct socket *sock, int level, + int optname, char *optval, unsigned int optlen); + int (*compat_getsockopt)(struct socket *sock, int level, + int optname, char *optval, int *optlen); + + void (*show_fdinfo)(struct seq_file *m, struct socket *sock); + int (*sendmsg) (struct socket *sock, struct msghdr *m, + size_t total_len); +# 185 "./include/linux/net.h" + int (*recvmsg) (struct socket *sock, struct msghdr *m, + size_t total_len, int flags); + int (*mmap) (struct file *file, struct socket *sock, + struct vm_area_struct * vma); + ssize_t (*sendpage) (struct socket *sock, struct page *page, + int offset, size_t size, int flags); + ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, unsigned int flags); + int (*set_peek_off)(struct sock *sk, int val); + int (*peek_len)(struct socket *sock); + + + + + int (*read_sock)(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor); + int (*sendpage_locked)(struct sock *sk, struct page *page, + int offset, size_t size, int flags); + int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, + size_t size); + int (*set_rcvlowat)(struct sock *sk, int val); +}; + + + + +struct net_proto_family { + int family; + int (*create)(struct net *net, struct socket *sock, + int protocol, int kern); + struct module *owner; +}; + +struct iovec; +struct kvec; + +enum { + SOCK_WAKE_IO, + SOCK_WAKE_WAITD, + SOCK_WAKE_SPACE, + SOCK_WAKE_URG, +}; + +int sock_wake_async(struct socket_wq *sk_wq, int how, int band); +int sock_register(const struct net_proto_family *fam); +void sock_unregister(int family); +bool sock_is_registered(int family); +int __sock_create(struct net *net, int family, int type, int proto, + struct socket **res, int kern); +int sock_create(int family, int type, int proto, struct socket **res); +int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res); +int sock_create_lite(int family, int type, int proto, struct socket **res); +struct socket *sock_alloc(void); +void sock_release(struct socket *sock); +int sock_sendmsg(struct socket *sock, struct msghdr *msg); +int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags); +struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname); +struct socket *sockfd_lookup(int fd, int *err); +struct socket *sock_from_file(struct file *file, int *err); + +int net_ratelimit(void); +# 293 "./include/linux/net.h" +int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, + size_t num, size_t len); +int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, + struct kvec *vec, size_t num, size_t len); +int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, + size_t num, size_t len, int flags); + +int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen); +int kernel_listen(struct socket *sock, int backlog); +int kernel_accept(struct socket *sock, struct socket **newsock, int flags); +int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, + int flags); +int kernel_getsockname(struct socket *sock, struct sockaddr *addr); +int kernel_getpeername(struct socket *sock, struct sockaddr *addr); +int kernel_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags); +int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, + size_t size, int flags); +int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); + + +u32 kernel_sock_ip_overhead(struct sock *sk); +# 64 "fs/io_uring.c" 2 +# 1 "./include/net/sock.h" 1 +# 41 "./include/net/sock.h" +# 1 "./include/linux/list_nulls.h" 1 +# 21 "./include/linux/list_nulls.h" +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next, **pprev; +}; +# 43 "./include/linux/list_nulls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_a_nulls(const struct hlist_nulls_node *ptr) +{ + return ((unsigned long)ptr & 1); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long get_nulls_value(const struct hlist_nulls_node *ptr) +{ + return ((unsigned long)ptr) >> 1; +} +# 67 "./include/linux/list_nulls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hlist_nulls_unhashed(const struct hlist_nulls_node *h) +{ + return !h->pprev; +} +# 81 "./include/linux/list_nulls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h) +{ + return !({ do { extern void __compiletime_assert_1128(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(h->pprev) == sizeof(char) || sizeof(h->pprev) == sizeof(short) || sizeof(h->pprev) == sizeof(int) || sizeof(h->pprev) == sizeof(long)) || sizeof(h->pprev) == sizeof(long long))) __compiletime_assert_1128(); } while (0); ({ typeof( _Generic((h->pprev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->pprev))) __x = (*(const volatile typeof( _Generic((h->pprev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->pprev))) *)&(h->pprev)); do { } while (0); (typeof(h->pprev))__x; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hlist_nulls_empty(const struct hlist_nulls_head *h) +{ + return is_a_nulls(({ do { extern void __compiletime_assert_1129(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(h->first) == sizeof(char) || sizeof(h->first) == sizeof(short) || sizeof(h->first) == sizeof(int) || sizeof(h->first) == sizeof(long)) || sizeof(h->first) == sizeof(long long))) __compiletime_assert_1129(); } while (0); ({ typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) __x = (*(const volatile typeof( _Generic((h->first), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (h->first))) *)&(h->first)); do { } while (0); (typeof(h->first))__x; }); })); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_nulls_add_head(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *first = h->first; + + n->next = first; + do { do { extern void __compiletime_assert_1130(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_1130(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&h->first); } while (0); } while (0); + h->first = n; + if (!is_a_nulls(first)) + do { do { extern void __compiletime_assert_1131(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(first->pprev) == sizeof(char) || sizeof(first->pprev) == sizeof(short) || sizeof(first->pprev) == sizeof(int) || sizeof(first->pprev) == sizeof(long)) || sizeof(first->pprev) == sizeof(long long))) __compiletime_assert_1131(); } while (0); do { *(volatile typeof(first->pprev) *)&(first->pprev) = (&n->next); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __hlist_nulls_del(struct hlist_nulls_node *n) +{ + struct hlist_nulls_node *next = n->next; + struct hlist_nulls_node **pprev = n->pprev; + + do { do { extern void __compiletime_assert_1132(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*pprev) == sizeof(char) || sizeof(*pprev) == sizeof(short) || sizeof(*pprev) == sizeof(int) || sizeof(*pprev) == sizeof(long)) || sizeof(*pprev) == sizeof(long long))) __compiletime_assert_1132(); } while (0); do { *(volatile typeof(*pprev) *)&(*pprev) = (next); } while (0); } while (0); + if (!is_a_nulls(next)) + do { do { extern void __compiletime_assert_1133(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(next->pprev) == sizeof(char) || sizeof(next->pprev) == sizeof(short) || sizeof(next->pprev) == sizeof(int) || sizeof(next->pprev) == sizeof(long)) || sizeof(next->pprev) == sizeof(long long))) __compiletime_assert_1133(); } while (0); do { *(volatile typeof(next->pprev) *)&(next->pprev) = (pprev); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_nulls_del(struct hlist_nulls_node *n) +{ + __hlist_nulls_del(n); + do { do { extern void __compiletime_assert_1134(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_1134(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *) 0x122 + (0xdead000000000000UL))); } while (0); } while (0); +} +# 42 "./include/net/sock.h" 2 + + + + +# 1 "./include/linux/netdevice.h" 1 +# 26 "./include/linux/netdevice.h" +# 1 "./include/linux/delay.h" 1 +# 24 "./include/linux/delay.h" +extern unsigned long loops_per_jiffy; + +# 1 "./arch/x86/include/asm/delay.h" 1 + + + + +# 1 "./include/asm-generic/delay.h" 1 + + + + + +extern void __bad_udelay(void); +extern void __bad_ndelay(void); + +extern void __udelay(unsigned long usecs); +extern void __ndelay(unsigned long nsecs); +extern void __const_udelay(unsigned long xloops); +extern void __delay(unsigned long loops); +# 6 "./arch/x86/include/asm/delay.h" 2 + + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) use_tsc_delay(void); +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) use_tpause_delay(void); +void use_mwaitx_delay(void); +# 27 "./include/linux/delay.h" 2 +# 56 "./include/linux/delay.h" +extern unsigned long lpj_fine; +void calibrate_delay(void); +void __attribute__((weak)) calibration_delay_done(void); +void msleep(unsigned int msecs); +unsigned long msleep_interruptible(unsigned int msecs); +void usleep_range(unsigned long min, unsigned long max); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ssleep(unsigned int seconds) +{ + msleep(seconds * 1000); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsleep(unsigned long usecs) +{ + if (usecs <= 10) + ({ if (__builtin_constant_p(usecs)) { if ((usecs) / 20000 >= 1) __bad_udelay(); else __const_udelay((usecs) * 0x10c7ul); } else { __udelay(usecs); } }); + else if (usecs <= 20000) + usleep_range(usecs, 2 * usecs); + else + msleep((((usecs) + (1000) - 1) / (1000))); +} +# 27 "./include/linux/netdevice.h" 2 + +# 1 "./include/linux/prefetch.h" 1 +# 54 "./include/linux/prefetch.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void prefetch_range(void *addr, size_t len) +{ + + + + + + + +} +# 29 "./include/linux/netdevice.h" 2 + + + + + + +# 1 "./include/linux/dynamic_queue_limits.h" 1 +# 41 "./include/linux/dynamic_queue_limits.h" +struct dql { + + unsigned int num_queued; + unsigned int adj_limit; + unsigned int last_obj_cnt; + + + + unsigned int limit __attribute__((__aligned__((1 << (6))))); + unsigned int num_completed; + + unsigned int prev_ovlimit; + unsigned int prev_num_queued; + unsigned int prev_last_obj_cnt; + + unsigned int lowest_slack; + unsigned long slack_start_time; + + + unsigned int max_limit; + unsigned int min_limit; + unsigned int slack_hold_time; +}; +# 73 "./include/linux/dynamic_queue_limits.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dql_queued(struct dql *dql, unsigned int count) +{ + do { if (__builtin_expect(!!(count > ((~0U) / 16)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1135)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dynamic_queue_limits.h"), "i" (75), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1136)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + dql->last_obj_cnt = count; + + + + + + + __asm__ __volatile__("": : :"memory"); + + dql->num_queued += count; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dql_avail(const struct dql *dql) +{ + return ({ do { extern void __compiletime_assert_1137(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(dql->adj_limit) == sizeof(char) || sizeof(dql->adj_limit) == sizeof(short) || sizeof(dql->adj_limit) == sizeof(int) || sizeof(dql->adj_limit) == sizeof(long)) || sizeof(dql->adj_limit) == sizeof(long long))) __compiletime_assert_1137(); } while (0); ({ typeof( _Generic((dql->adj_limit), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dql->adj_limit))) __x = (*(const volatile typeof( _Generic((dql->adj_limit), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dql->adj_limit))) *)&(dql->adj_limit)); do { } while (0); (typeof(dql->adj_limit))__x; }); }) - ({ do { extern void __compiletime_assert_1138(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(dql->num_queued) == sizeof(char) || sizeof(dql->num_queued) == sizeof(short) || sizeof(dql->num_queued) == sizeof(int) || sizeof(dql->num_queued) == sizeof(long)) || sizeof(dql->num_queued) == sizeof(long long))) __compiletime_assert_1138(); } while (0); ({ typeof( _Generic((dql->num_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dql->num_queued))) __x = (*(const volatile typeof( _Generic((dql->num_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (dql->num_queued))) *)&(dql->num_queued)); do { } while (0); (typeof(dql->num_queued))__x; }); }); +} + + +void dql_completed(struct dql *dql, unsigned int count); + + +void dql_reset(struct dql *dql); + + +void dql_init(struct dql *dql, unsigned int hold_time); +# 36 "./include/linux/netdevice.h" 2 + +# 1 "./include/linux/ethtool.h" 1 +# 18 "./include/linux/ethtool.h" +# 1 "./include/uapi/linux/ethtool.h" 1 +# 19 "./include/uapi/linux/ethtool.h" +# 1 "./include/linux/if_ether.h" 1 +# 19 "./include/linux/if_ether.h" +# 1 "./include/linux/skbuff.h" 1 +# 24 "./include/linux/skbuff.h" +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 25 "./include/linux/skbuff.h" 2 + + +# 1 "./include/linux/textsearch.h" 1 +# 11 "./include/linux/textsearch.h" +struct module; + +struct ts_config; +# 23 "./include/linux/textsearch.h" +struct ts_state +{ + unsigned int offset; + char cb[40]; +}; +# 39 "./include/linux/textsearch.h" +struct ts_ops +{ + const char *name; + struct ts_config * (*init)(const void *, unsigned int, gfp_t, int); + unsigned int (*find)(struct ts_config *, + struct ts_state *); + void (*destroy)(struct ts_config *); + void * (*get_pattern)(struct ts_config *); + unsigned int (*get_pattern_len)(struct ts_config *); + struct module *owner; + struct list_head list; +}; +# 59 "./include/linux/textsearch.h" +struct ts_config +{ + struct ts_ops *ops; + int flags; +# 76 "./include/linux/textsearch.h" + unsigned int (*get_next_block)(unsigned int consumed, + const u8 **dst, + struct ts_config *conf, + struct ts_state *state); +# 89 "./include/linux/textsearch.h" + void (*finish)(struct ts_config *conf, + struct ts_state *state); +}; +# 105 "./include/linux/textsearch.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int textsearch_next(struct ts_config *conf, + struct ts_state *state) +{ + unsigned int ret = conf->ops->find(conf, state); + + if (conf->finish) + conf->finish(conf, state); + + return ret; +} +# 124 "./include/linux/textsearch.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int textsearch_find(struct ts_config *conf, + struct ts_state *state) +{ + state->offset = 0; + return textsearch_next(conf, state); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *textsearch_get_pattern(struct ts_config *conf) +{ + return conf->ops->get_pattern(conf); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int textsearch_get_pattern_len(struct ts_config *conf) +{ + return conf->ops->get_pattern_len(conf); +} + +extern int textsearch_register(struct ts_ops *); +extern int textsearch_unregister(struct ts_ops *); +extern struct ts_config *textsearch_prepare(const char *, const void *, + unsigned int, gfp_t, int); +extern void textsearch_destroy(struct ts_config *conf); +extern unsigned int textsearch_find_continuous(struct ts_config *, + struct ts_state *, + const void *, unsigned int); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct ts_config *alloc_ts_config(size_t payload, + gfp_t gfp_mask) +{ + struct ts_config *conf; + + conf = kzalloc((((sizeof(*conf)) + 8 -1) & ~(8 -1)) + payload, gfp_mask); + if (conf == ((void *)0)) + return ERR_PTR(-12); + + return conf; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *ts_config_priv(struct ts_config *conf) +{ + return ((u8 *) conf + (((sizeof(struct ts_config)) + 8 -1) & ~(8 -1))); +} +# 28 "./include/linux/skbuff.h" 2 +# 1 "./include/net/checksum.h" 1 +# 19 "./include/net/checksum.h" +# 1 "./arch/x86/include/generated/uapi/asm/types.h" 1 +# 20 "./include/net/checksum.h" 2 + + +# 1 "./arch/x86/include/asm/checksum.h" 1 + + + + + + +# 1 "./arch/x86/include/asm/checksum_64.h" 1 +# 23 "./arch/x86/include/asm/checksum_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 csum_fold(__wsum sum) +{ + asm(" addl %1,%0\n" + " adcl $0xffff,%0" + : "=r" (sum) + : "r" (( u32)sum << 16), + "0" (( u32)sum & 0xffff0000)); + return ( __sum16)(~( u32)sum >> 16); +} +# 46 "./arch/x86/include/asm/checksum_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + unsigned int sum; + + asm(" movl (%1), %0\n" + " subl $4, %2\n" + " jbe 2f\n" + " addl 4(%1), %0\n" + " adcl 8(%1), %0\n" + " adcl 12(%1), %0\n" + "1: adcl 16(%1), %0\n" + " lea 4(%1), %1\n" + " decl %2\n" + " jne 1b\n" + " adcl $0, %0\n" + " movl %0, %2\n" + " shrl $16, %0\n" + " addw %w2, %w0\n" + " adcl $0, %0\n" + " notl %0\n" + "2:" + + + + : "=r" (sum), "=r" (iph), "=r" (ihl) + : "1" (iph), "2" (ihl) + : "memory"); + return ( __sum16)sum; +} +# 87 "./arch/x86/include/asm/checksum_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) +{ + asm(" addl %1, %0\n" + " adcl %2, %0\n" + " adcl %3, %0\n" + " adcl $0, %0\n" + : "=r" (sum) + : "g" (daddr), "g" (saddr), + "g" ((len + proto)<<8), "0" (sum)); + return sum; +} +# 113 "./arch/x86/include/asm/checksum_64.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, + __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} +# 130 "./arch/x86/include/asm/checksum_64.h" +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + + +extern __attribute__((__externally_visible__)) __wsum csum_partial_copy_generic(const void *src, const void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + + +extern __wsum csum_and_copy_from_user(const void *src, void *dst, + int len, __wsum isum, int *errp); +extern __wsum csum_and_copy_to_user(const void *src, void *dst, + int len, __wsum isum, int *errp); +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); +# 153 "./arch/x86/include/asm/checksum_64.h" +extern __sum16 ip_compute_csum(const void *buff, int len); +# 168 "./arch/x86/include/asm/checksum_64.h" +struct in6_addr; + + +extern __sum16 +csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned add32_with_carry(unsigned a, unsigned b) +{ + asm("addl %2,%0\n\t" + "adcl $0,%0" + : "=r" (a) + : "0" (a), "rm" (b)); + return a; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum csum_add(__wsum csum, __wsum addend) +{ + return ( __wsum)add32_with_carry(( unsigned)csum, + ( unsigned)addend); +} +# 8 "./arch/x86/include/asm/checksum.h" 2 +# 23 "./include/net/checksum.h" 2 +# 59 "./include/net/checksum.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum csum_sub(__wsum csum, __wsum addend) +{ + return csum_add(csum, ~addend); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 csum16_add(__sum16 csum, __be16 addend) +{ + u16 res = ( u16)csum; + + res += ( u16)addend; + return ( __sum16)(res + (res < ( u16)addend)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 csum16_sub(__sum16 csum, __be16 addend) +{ + return csum16_add(csum, ~addend); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum +csum_block_add(__wsum csum, __wsum csum2, int offset) +{ + u32 sum = ( u32)csum2; + + + if (offset & 1) + sum = ror32(sum, 8); + + return csum_add(csum, ( __wsum)sum); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum +csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) +{ + return csum_block_add(csum, csum2, offset); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum +csum_block_sub(__wsum csum, __wsum csum2, int offset) +{ + return csum_block_add(csum, ~csum2, offset); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum csum_unfold(__sum16 n) +{ + return ( __wsum)n; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum csum_partial_ext(const void *buff, int len, __wsum sum) +{ + return csum_partial(buff, len, sum); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void csum_replace4(__sum16 *sum, __be32 from, __be32 to) +{ + __wsum tmp = csum_sub(~csum_unfold(*sum), ( __wsum)from); + + *sum = csum_fold(csum_add(tmp, ( __wsum)to)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void csum_replace2(__sum16 *sum, __be16 old, __be16 new) +{ + *sum = ~csum16_add(csum16_sub(~(*sum), old), new); +} + +struct sk_buff; +void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, + __be32 from, __be32 to, bool pseudohdr); +void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, + const __be32 *from, const __be32 *to, + bool pseudohdr); +void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, + __wsum diff, bool pseudohdr); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, + __be16 from, __be16 to, + bool pseudohdr) +{ + inet_proto_csum_replace4(sum, skb, ( __be32)from, + ( __be32)to, pseudohdr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum remcsum_adjust(void *ptr, __wsum csum, + int start, int offset) +{ + __sum16 *psum = (__sum16 *)(ptr + offset); + __wsum delta; + + + csum = csum_sub(csum, csum_partial(ptr, start, 0)); + + + delta = csum_sub(( __wsum)csum_fold(csum), + ( __wsum)*psum); + *psum = csum_fold(csum); + + return delta; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void remcsum_unadjust(__sum16 *psum, __wsum delta) +{ + *psum = csum_fold(csum_sub(delta, ( __wsum)*psum)); +} +# 29 "./include/linux/skbuff.h" 2 + + +# 1 "./include/linux/dma-mapping.h" 1 +# 9 "./include/linux/dma-mapping.h" +# 1 "./include/linux/dma-debug.h" 1 +# 13 "./include/linux/dma-debug.h" +struct device; +struct scatterlist; +struct bus_type; + + + +extern void dma_debug_add_bus(struct bus_type *bus); + +extern void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len); + +extern void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr); + +extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + +extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction); + +extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction); + +extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int dir); + +extern void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt); + +extern void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr); + +extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr, + size_t size, int direction, + dma_addr_t dma_addr); + +extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction); + +extern void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + int direction); + +extern void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction); + +extern void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_dump_mappings(struct device *dev); + +extern void debug_dma_assert_idle(struct page *page); +# 10 "./include/linux/dma-mapping.h" 2 +# 1 "./include/linux/dma-direction.h" 1 + + + + +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; +# 11 "./include/linux/dma-mapping.h" 2 +# 76 "./include/linux/dma-mapping.h" +struct dma_map_ops { + void* (*alloc)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs); + void (*free)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + unsigned long attrs); + int (*mmap)(struct device *, struct vm_area_struct *, + void *, dma_addr_t, size_t, + unsigned long attrs); + + int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, + dma_addr_t, size_t, unsigned long attrs); + + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + + + + + int (*map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_sg)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + unsigned long attrs); + dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*sync_single_for_cpu)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_single_for_device)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_sg_for_cpu)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + void (*sync_sg_for_device)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + void (*cache_sync)(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); + int (*dma_supported)(struct device *dev, u64 mask); + u64 (*get_required_mask)(struct device *dev); + size_t (*max_mapping_size)(struct device *dev); + unsigned long (*get_merge_boundary)(struct device *dev); +}; + + + +extern const struct dma_map_ops dma_virt_ops; +extern const struct dma_map_ops dma_dummy_ops; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int valid_dma_direction(int dma_direction) +{ + return ((dma_direction == DMA_BIDIRECTIONAL) || + (dma_direction == DMA_TO_DEVICE) || + (dma_direction == DMA_FROM_DEVICE)); +} + + + + + + +int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret); +int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); + +int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, size_t size, int *ret); + +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle); +int dma_release_from_global_coherent(int order, void *vaddr); +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, + size_t size, int *ret); +# 191 "./include/linux/dma-mapping.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dma_is_direct(const struct dma_map_ops *ops) +{ + return __builtin_expect(!!(!ops), 1); +} + + + + + +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs); +dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir, unsigned long attrs); + + + +void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir); +void dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +# 228 "./include/linux/dma-mapping.h" +void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs); +void dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir); +void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +# 256 "./include/linux/dma-mapping.h" +size_t dma_direct_max_mapping_size(struct device *dev); + + +# 1 "./arch/x86/include/asm/dma-mapping.h" 1 +# 13 "./arch/x86/include/asm/dma-mapping.h" +# 1 "./arch/x86/include/asm/swiotlb.h" 1 + + + + +# 1 "./include/linux/swiotlb.h" 1 +# 9 "./include/linux/swiotlb.h" +struct device; +struct page; +struct scatterlist; + +enum swiotlb_force { + SWIOTLB_NORMAL, + SWIOTLB_FORCE, + SWIOTLB_NO_FORCE, +}; +# 32 "./include/linux/swiotlb.h" +extern void swiotlb_init(int verbose); +int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); +extern unsigned long swiotlb_nr_tbl(void); +unsigned long swiotlb_size_or_default(void); +extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) swiotlb_update_mem_attributes(void); + + + + +enum dma_sync_target { + SYNC_FOR_CPU = 0, + SYNC_FOR_DEVICE = 1, +}; + +extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, + dma_addr_t tbl_dma_addr, + phys_addr_t phys, + size_t mapping_size, + size_t alloc_size, + enum dma_data_direction dir, + unsigned long attrs); + +extern void swiotlb_tbl_unmap_single(struct device *hwdev, + phys_addr_t tlb_addr, + size_t mapping_size, + size_t alloc_size, + enum dma_data_direction dir, + unsigned long attrs); + +extern void swiotlb_tbl_sync_single(struct device *hwdev, + phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, + enum dma_sync_target target); + +dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs); + + +extern enum swiotlb_force swiotlb_force; +extern phys_addr_t io_tlb_start, io_tlb_end; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_swiotlb_buffer(phys_addr_t paddr) +{ + return paddr >= io_tlb_start && paddr < io_tlb_end; +} + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) swiotlb_exit(void); +unsigned int swiotlb_max_segment(void); +size_t swiotlb_max_mapping_size(struct device *dev); +bool is_swiotlb_active(void); +# 107 "./include/linux/swiotlb.h" +extern void swiotlb_print_info(void); +extern void swiotlb_set_max_segment(unsigned int); +# 6 "./arch/x86/include/asm/swiotlb.h" 2 + + +extern int swiotlb; +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pci_swiotlb_detect_override(void); +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pci_swiotlb_detect_4gb(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pci_swiotlb_init(void); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) pci_swiotlb_late_init(void); +# 14 "./arch/x86/include/asm/dma-mapping.h" 2 +# 1 "./include/linux/dma-contiguous.h" 1 +# 55 "./include/linux/dma-contiguous.h" +struct cma; +struct page; + + + +extern struct cma *dma_contiguous_default_area; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cma *dev_get_cma_area(struct device *dev) +{ + if (dev && dev->cma_area) + return dev->cma_area; + return dma_contiguous_default_area; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_set_cma_area(struct device *dev, struct cma *cma) +{ + if (dev) + dev->cma_area = cma; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_contiguous_set_default(struct cma *cma) +{ + dma_contiguous_default_area = cma; +} + +void dma_contiguous_reserve(phys_addr_t addr_limit); + +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma, + bool fixed); +# 99 "./include/linux/dma-contiguous.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_declare_contiguous(struct device *dev, phys_addr_t size, + phys_addr_t base, phys_addr_t limit) +{ + struct cma *cma; + int ret; + ret = dma_contiguous_reserve_area(size, base, limit, &cma, true); + if (ret == 0) + dev_set_cma_area(dev, cma); + + return ret; +} + +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, + unsigned int order, bool no_warn); +bool dma_release_from_contiguous(struct device *dev, struct page *pages, + int count); +struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); +void dma_free_contiguous(struct device *dev, struct page *page, size_t size); +# 15 "./arch/x86/include/asm/dma-mapping.h" 2 + +extern int iommu_merge; +extern int panic_on_overflow; + +extern const struct dma_map_ops *dma_ops; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return dma_ops; +} +# 260 "./include/linux/dma-mapping.h" 2 + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev->dma_ops) + return dev->dma_ops; + return get_arch_dma_ops(dev->bus); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ + dev->dma_ops = dma_ops; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dma_addr_t dma_map_page_attrs(struct device *dev, + struct page *page, size_t offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1139)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (281), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1140)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (dma_is_direct(ops)) + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); + else + addr = ops->map_page(dev, page, offset, size, dir, attrs); + debug_dma_map_page(dev, page, offset, size, dir, addr); + + return addr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1141)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (296), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1142)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (dma_is_direct(ops)) + dma_direct_unmap_page(dev, addr, size, dir, attrs); + else if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + int ents; + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1143)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (315), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1144)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (dma_is_direct(ops)) + ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); + else + ents = ops->map_sg(dev, sg, nents, dir, attrs); + do { if (__builtin_expect(!!(ents < 0), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1145)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (320), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1146)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1147)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (332), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1148)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + debug_dma_unmap_sg(dev, sg, nents, dir); + if (dma_is_direct(ops)) + dma_direct_unmap_sg(dev, sg, nents, dir, attrs); + else if (ops->unmap_sg) + ops->unmap_sg(dev, sg, nents, dir, attrs); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dma_addr_t dma_map_resource(struct device *dev, + phys_addr_t phys_addr, + size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr = (~(dma_addr_t)0); + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1149)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (349), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1150)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + + if (({ int __ret_warn_on = !!(pfn_valid(((unsigned long)((phys_addr) >> 12)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1151)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (352), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1152)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1153)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return (~(dma_addr_t)0); + + if (dma_is_direct(ops)) + addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); + else if (ops->map_resource) + addr = ops->map_resource(dev, phys_addr, size, dir, attrs); + + debug_dma_map_resource(dev, phys_addr, size, dir, addr); + return addr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1154)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (370), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1155)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (!dma_is_direct(ops) && ops->unmap_resource) + ops->unmap_resource(dev, addr, size, dir, attrs); + debug_dma_unmap_resource(dev, addr, size, dir); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1156)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (382), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1157)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (dma_is_direct(ops)) + dma_direct_sync_single_for_cpu(dev, addr, size, dir); + else if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1158)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (396), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1159)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (dma_is_direct(ops)) + dma_direct_sync_single_for_device(dev, addr, size, dir); + else if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1160)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (410), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1161)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (dma_is_direct(ops)) + dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); + else if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + do { if (__builtin_expect(!!(!valid_dma_direction(dir)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1162)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (424), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1163)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (dma_is_direct(ops)) + dma_direct_sync_sg_for_device(dev, sg, nelems, dir); + else if (ops->sync_sg_for_device) + ops->sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + debug_dma_mapping_error(dev, dma_addr); + + if (dma_addr == (~(dma_addr_t)0)) + return -12; + return 0; +} + +void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag, unsigned long attrs); +void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle, unsigned long attrs); +void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs); +void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle); +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir); +int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +bool dma_can_mmap(struct device *dev); +int dma_supported(struct device *dev, u64 mask); +int dma_set_mask(struct device *dev, u64 mask); +int dma_set_coherent_mask(struct device *dev, u64 mask); +u64 dma_get_required_mask(struct device *dev); +size_t dma_max_mapping_size(struct device *dev); +unsigned long dma_get_merge_boundary(struct device *dev); +# 580 "./include/linux/dma-mapping.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + + if (({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(is_vmalloc_addr(ptr)); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1164)); }); __warn_printk("%s %s: " "rejecting DMA map of vmalloc memory\n", dev_driver_string(dev), dev_name(dev)); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1165)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/dma-mapping.h"), "i" (584), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1166)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1167)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1168)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }) + ) + return (~(dma_addr_t)0); + debug_dma_map_single(dev, ptr, size); + return dma_map_page_attrs(dev, (((struct page *)vmemmap_base) + (__phys_addr((unsigned long)(ptr)) >> 12)), ((unsigned long)(ptr) & ~(~(((1UL) << 12)-1))), + size, dir, attrs); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return dma_unmap_page_attrs(dev, addr, size, dir, attrs); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t addr, unsigned long offset, size_t size, + enum dma_data_direction dir) +{ + return dma_sync_single_for_cpu(dev, addr + offset, size, dir); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t addr, unsigned long offset, size_t size, + enum dma_data_direction dir) +{ + return dma_sync_single_for_device(dev, addr + offset, size, dir); +} +# 628 "./include/linux/dma-mapping.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + int nents; + + nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); + if (nents <= 0) + return -22; + sgt->nents = nents; + return 0; +} +# 651 "./include/linux/dma-mapping.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); +} +# 669 "./include/linux/dma-mapping.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_sync_sgtable_for_cpu(struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); +} +# 686 "./include/linux/dma-mapping.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_sync_sgtable_for_device(struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); +} +# 701 "./include/linux/dma-mapping.h" +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); + +struct page **dma_common_find_pages(void *cpu_addr); +void *dma_common_contiguous_remap(struct page *page, size_t size, + pgprot_t prot, const void *caller); + +void *dma_common_pages_remap(struct page **pages, size_t size, + pgprot_t prot, const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size); + +void *dma_alloc_from_pool(struct device *dev, size_t size, + struct page **ret_page, gfp_t flags); +bool dma_free_from_pool(struct device *dev, void *start, size_t size); + +int +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, unsigned long attrs); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + + return dma_alloc_attrs(dev, size, dma_handle, gfp, + (gfp & (( gfp_t)0x2000u)) ? (1UL << 8) : 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 dma_get_mask(struct device *dev) +{ + if (dev->dma_mask && *dev->dma_mask) + return *dev->dma_mask; + return (((32) == 64) ? ~0ULL : ((1ULL<<(32))-1)); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) +{ + dev->dma_mask = &dev->coherent_dma_mask; + return dma_set_mask_and_coherent(dev, mask); +} +# 775 "./include/linux/dma-mapping.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dma_addressing_limited(struct device *dev) +{ + return ({ typeof(dma_get_mask(dev)) __x = (dma_get_mask(dev)); typeof(dev->bus_dma_limit) __y = (dev->bus_dma_limit); __x == 0 ? __y : ((__y == 0) ? __x : __builtin_choose_expr(((!!(sizeof((typeof(__x) *)1 == (typeof(__y) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(__x) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(__y) * 0l)) : (int *)8))))), ((__x) < (__y) ? (__x) : (__y)), ({ typeof(__x) __UNIQUE_ID___x1169 = (__x); typeof(__y) __UNIQUE_ID___y1170 = (__y); ((__UNIQUE_ID___x1169) < (__UNIQUE_ID___y1170) ? (__UNIQUE_ID___x1169) : (__UNIQUE_ID___y1170)); }))); }) < + dma_get_required_mask(dev); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_setup_dma_ops(struct device *dev, u64 dma_base, + u64 size, const struct iommu_ops *iommu, bool coherent) +{ +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_teardown_dma_ops(struct device *dev) +{ +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int dma_get_max_seg_size(struct device *dev) +{ + if (dev->dma_parms && dev->dma_parms->max_segment_size) + return dev->dma_parms->max_segment_size; + return 0x00010000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_set_max_seg_size(struct device *dev, unsigned int size) +{ + if (dev->dma_parms) { + dev->dma_parms->max_segment_size = size; + return 0; + } + return -5; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long dma_get_seg_boundary(struct device *dev) +{ + if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) + return dev->dma_parms->segment_boundary_mask; + return (((32) == 64) ? ~0ULL : ((1ULL<<(32))-1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_set_seg_boundary(struct device *dev, unsigned long mask) +{ + if (dev->dma_parms) { + dev->dma_parms->segment_boundary_mask = mask; + return 0; + } + return -5; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_get_cache_alignment(void) +{ + + + + return 1; +} + + +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size); +# 851 "./include/linux/dma-mapping.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + return dmam_alloc_attrs(dev, size, dma_handle, gfp, + (gfp & (( gfp_t)0x2000u)) ? (1UL << 8) : 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *dma_alloc_wc(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t gfp) +{ + unsigned long attrs = (1UL << 2); + + if (gfp & (( gfp_t)0x2000u)) + attrs |= (1UL << 8); + + return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dma_free_wc(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr) +{ + return dma_free_attrs(dev, size, cpu_addr, dma_addr, + (1UL << 2)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dma_mmap_wc(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, + (1UL << 2)); +} +# 32 "./include/linux/skbuff.h" 2 +# 1 "./include/linux/netdev_features.h" 1 +# 12 "./include/linux/netdev_features.h" +typedef u64 netdev_features_t; + +enum { + NETIF_F_SG_BIT, + NETIF_F_IP_CSUM_BIT, + __UNUSED_NETIF_F_1, + NETIF_F_HW_CSUM_BIT, + NETIF_F_IPV6_CSUM_BIT, + NETIF_F_HIGHDMA_BIT, + NETIF_F_FRAGLIST_BIT, + NETIF_F_HW_VLAN_CTAG_TX_BIT, + NETIF_F_HW_VLAN_CTAG_RX_BIT, + NETIF_F_HW_VLAN_CTAG_FILTER_BIT, + NETIF_F_VLAN_CHALLENGED_BIT, + NETIF_F_GSO_BIT, + NETIF_F_LLTX_BIT, + + NETIF_F_NETNS_LOCAL_BIT, + NETIF_F_GRO_BIT, + NETIF_F_LRO_BIT, + + NETIF_F_GSO_SHIFT, + NETIF_F_TSO_BIT + = NETIF_F_GSO_SHIFT, + NETIF_F_GSO_ROBUST_BIT, + NETIF_F_TSO_ECN_BIT, + NETIF_F_TSO_MANGLEID_BIT, + NETIF_F_TSO6_BIT, + NETIF_F_FSO_BIT, + NETIF_F_GSO_GRE_BIT, + NETIF_F_GSO_GRE_CSUM_BIT, + NETIF_F_GSO_IPXIP4_BIT, + NETIF_F_GSO_IPXIP6_BIT, + NETIF_F_GSO_UDP_TUNNEL_BIT, + NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT, + NETIF_F_GSO_PARTIAL_BIT, + + + + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, + NETIF_F_GSO_SCTP_BIT, + NETIF_F_GSO_ESP_BIT, + NETIF_F_GSO_UDP_BIT, + NETIF_F_GSO_UDP_L4_BIT, + NETIF_F_GSO_FRAGLIST_BIT, + NETIF_F_GSO_LAST = + NETIF_F_GSO_FRAGLIST_BIT, + + NETIF_F_FCOE_CRC_BIT, + NETIF_F_SCTP_CRC_BIT, + NETIF_F_FCOE_MTU_BIT, + NETIF_F_NTUPLE_BIT, + NETIF_F_RXHASH_BIT, + NETIF_F_RXCSUM_BIT, + NETIF_F_NOCACHE_COPY_BIT, + NETIF_F_LOOPBACK_BIT, + NETIF_F_RXFCS_BIT, + NETIF_F_RXALL_BIT, + NETIF_F_HW_VLAN_STAG_TX_BIT, + NETIF_F_HW_VLAN_STAG_RX_BIT, + NETIF_F_HW_VLAN_STAG_FILTER_BIT, + NETIF_F_HW_L2FW_DOFFLOAD_BIT, + + NETIF_F_HW_TC_BIT, + NETIF_F_HW_ESP_BIT, + NETIF_F_HW_ESP_TX_CSUM_BIT, + NETIF_F_RX_UDP_TUNNEL_PORT_BIT, + NETIF_F_HW_TLS_TX_BIT, + NETIF_F_HW_TLS_RX_BIT, + + NETIF_F_GRO_HW_BIT, + NETIF_F_HW_TLS_RECORD_BIT, + NETIF_F_GRO_FRAGLIST_BIT, + + NETIF_F_HW_MACSEC_BIT, +# 95 "./include/linux/netdev_features.h" + NETDEV_FEATURE_COUNT +}; +# 163 "./include/linux/netdev_features.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int find_next_netdev_feature(u64 feature, unsigned long start) +{ + + + + feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); + + return fls64(feature) - 1; +} +# 33 "./include/linux/skbuff.h" 2 + + +# 1 "./include/net/flow_dissector.h" 1 + + + + + +# 1 "./include/linux/in6.h" 1 +# 19 "./include/linux/in6.h" +# 1 "./include/uapi/linux/in6.h" 1 +# 33 "./include/uapi/linux/in6.h" +struct in6_addr { + union { + __u8 u6_addr8[16]; + + __be16 u6_addr16[8]; + __be32 u6_addr32[4]; + + } in6_u; + + + + + +}; + + + +struct sockaddr_in6 { + unsigned short int sin6_family; + __be16 sin6_port; + __be32 sin6_flowinfo; + struct in6_addr sin6_addr; + __u32 sin6_scope_id; +}; + + + +struct ipv6_mreq { + + struct in6_addr ipv6mr_multiaddr; + + + int ipv6mr_ifindex; +}; + + + + +struct in6_flowlabel_req { + struct in6_addr flr_dst; + __be32 flr_label; + __u8 flr_action; + __u8 flr_share; + __u16 flr_flags; + __u16 flr_expires; + __u16 flr_linger; + __u32 __flr_pad; + +}; +# 20 "./include/linux/in6.h" 2 + + + + + +extern const struct in6_addr in6addr_any; + +extern const struct in6_addr in6addr_loopback; + +extern const struct in6_addr in6addr_linklocal_allnodes; + + +extern const struct in6_addr in6addr_linklocal_allrouters; + + +extern const struct in6_addr in6addr_interfacelocal_allnodes; + + +extern const struct in6_addr in6addr_interfacelocal_allrouters; + + +extern const struct in6_addr in6addr_sitelocal_allrouters; +# 7 "./include/net/flow_dissector.h" 2 +# 1 "./include/linux/siphash.h" 1 +# 20 "./include/linux/siphash.h" +typedef struct { + u64 key[2]; +} siphash_key_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool siphash_key_is_zero(const siphash_key_t *key) +{ + return !(key->key[0] | key->key[1]); +} + +u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); + + + + +u64 siphash_1u64(const u64 a, const siphash_key_t *key); +u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); +u64 siphash_3u64(const u64 a, const u64 b, const u64 c, + const siphash_key_t *key); +u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d, + const siphash_key_t *key); +u64 siphash_1u32(const u32 a, const siphash_key_t *key); +u64 siphash_3u32(const u32 a, const u32 b, const u32 c, + const siphash_key_t *key); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 siphash_2u32(const u32 a, const u32 b, + const siphash_key_t *key) +{ + return siphash_1u64((u64)b << 32 | a, key); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 siphash_4u32(const u32 a, const u32 b, const u32 c, + const u32 d, const siphash_key_t *key) +{ + return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ___siphash_aligned(const __le64 *data, size_t len, + const siphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return siphash_1u32(__le32_to_cpup((const __le32 *)data), key); + if (__builtin_constant_p(len) && len == 8) + return siphash_1u64((( __u64)(__le64)(data[0])), key); + if (__builtin_constant_p(len) && len == 16) + return siphash_2u64((( __u64)(__le64)(data[0])), (( __u64)(__le64)(data[1])), + key); + if (__builtin_constant_p(len) && len == 24) + return siphash_3u64((( __u64)(__le64)(data[0])), (( __u64)(__le64)(data[1])), + (( __u64)(__le64)(data[2])), key); + if (__builtin_constant_p(len) && len == 32) + return siphash_4u64((( __u64)(__le64)(data[0])), (( __u64)(__le64)(data[1])), + (( __u64)(__le64)(data[2])), (( __u64)(__le64)(data[3])), + key); + return __siphash_aligned(data, len, key); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 siphash(const void *data, size_t len, + const siphash_key_t *key) +{ + + + + + return ___siphash_aligned(data, len, key); +} + + +typedef struct { + unsigned long key[2]; +} hsiphash_key_t; + +u32 __hsiphash_aligned(const void *data, size_t len, + const hsiphash_key_t *key); + + + + + +u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); +u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); +u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c, + const hsiphash_key_t *key); +u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d, + const hsiphash_key_t *key); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 ___hsiphash_aligned(const __le32 *data, size_t len, + const hsiphash_key_t *key) +{ + if (__builtin_constant_p(len) && len == 4) + return hsiphash_1u32((( __u32)(__le32)(data[0])), key); + if (__builtin_constant_p(len) && len == 8) + return hsiphash_2u32((( __u32)(__le32)(data[0])), (( __u32)(__le32)(data[1])), + key); + if (__builtin_constant_p(len) && len == 12) + return hsiphash_3u32((( __u32)(__le32)(data[0])), (( __u32)(__le32)(data[1])), + (( __u32)(__le32)(data[2])), key); + if (__builtin_constant_p(len) && len == 16) + return hsiphash_4u32((( __u32)(__le32)(data[0])), (( __u32)(__le32)(data[1])), + (( __u32)(__le32)(data[2])), (( __u32)(__le32)(data[3])), + key); + return __hsiphash_aligned(data, len, key); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 hsiphash(const void *data, size_t len, + const hsiphash_key_t *key) +{ + + + + + return ___hsiphash_aligned(data, len, key); +} +# 8 "./include/net/flow_dissector.h" 2 + +# 1 "./include/uapi/linux/if_ether.h" 1 +# 164 "./include/uapi/linux/if_ether.h" +struct ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_proto; +} __attribute__((packed)); +# 10 "./include/net/flow_dissector.h" 2 + +struct bpf_prog; +struct net; +struct sk_buff; + + + + + +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; + u32 flags; +}; + + + + + +enum flow_dissect_ret { + FLOW_DISSECT_RET_OUT_GOOD, + FLOW_DISSECT_RET_OUT_BAD, + FLOW_DISSECT_RET_PROTO_AGAIN, + FLOW_DISSECT_RET_IPPROTO_AGAIN, + FLOW_DISSECT_RET_CONTINUE, +}; + + + + + + +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_tags { + u32 flow_label; +}; + +struct flow_dissector_key_vlan { + union { + struct { + u16 vlan_id:12, + vlan_dei:1, + vlan_priority:3; + }; + __be16 vlan_tci; + }; + __be16 vlan_tpid; +}; + +struct flow_dissector_mpls_lse { + u32 mpls_ttl:8, + mpls_bos:1, + mpls_tc:3, + mpls_label:20; +}; + + +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[7]; + u8 used_lses; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dissector_set_mpls_lse(struct flow_dissector_key_mpls *mpls, + int lse_index) +{ + mpls->used_lses |= 1 << lse_index; +} +# 90 "./include/net/flow_dissector.h" +struct flow_dissector_key_enc_opts { + u8 data[255]; + + + u8 len; + __be16 dst_opt_type; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + + + + + + +struct flow_dissector_key_ipv4_addrs { + + __be32 src; + __be32 dst; +}; + + + + + + +struct flow_dissector_key_ipv6_addrs { + + struct in6_addr src; + struct in6_addr dst; +}; + + + + + +struct flow_dissector_key_tipc { + __be32 key; +}; + + + + + + +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc tipckey; + }; +}; +# 155 "./include/net/flow_dissector.h" +struct flow_dissector_key_arp { + __u32 sip; + __u32 tip; + __u8 op; + unsigned char sha[6]; + unsigned char tha[6]; +}; + + + + + + + +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + + + + + + + +struct flow_dissector_key_icmp { + struct { + u8 type; + u8 code; + }; + u16 id; +}; + + + + + + +struct flow_dissector_key_eth_addrs { + + unsigned char dst[6]; + unsigned char src[6]; +}; + + + + + +struct flow_dissector_key_tcp { + __be16 flags; +}; + + + + + + +struct flow_dissector_key_ip { + __u8 tos; + __u8 ttl; +}; + + + + + + +struct flow_dissector_key_meta { + int ingress_ifindex; + u16 ingress_iftype; +}; +# 239 "./include/net/flow_dissector.h" +struct flow_dissector_key_ct { + u16 ct_state; + u16 ct_zone; + u32 ct_mark; + u32 ct_labels[4]; +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL, + FLOW_DISSECTOR_KEY_BASIC, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + FLOW_DISSECTOR_KEY_PORTS, + FLOW_DISSECTOR_KEY_PORTS_RANGE, + FLOW_DISSECTOR_KEY_ICMP, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + FLOW_DISSECTOR_KEY_TIPC, + FLOW_DISSECTOR_KEY_ARP, + FLOW_DISSECTOR_KEY_VLAN, + FLOW_DISSECTOR_KEY_FLOW_LABEL, + FLOW_DISSECTOR_KEY_GRE_KEYID, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY, + FLOW_DISSECTOR_KEY_ENC_KEYID, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + FLOW_DISSECTOR_KEY_ENC_CONTROL, + FLOW_DISSECTOR_KEY_ENC_PORTS, + FLOW_DISSECTOR_KEY_MPLS, + FLOW_DISSECTOR_KEY_TCP, + FLOW_DISSECTOR_KEY_IP, + FLOW_DISSECTOR_KEY_CVLAN, + FLOW_DISSECTOR_KEY_ENC_IP, + FLOW_DISSECTOR_KEY_ENC_OPTS, + FLOW_DISSECTOR_KEY_META, + FLOW_DISSECTOR_KEY_CT, + + FLOW_DISSECTOR_KEY_MAX, +}; + + + + + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; + +}; + +struct flow_dissector { + unsigned int used_keys; + unsigned short int offset[FLOW_DISSECTOR_KEY_MAX]; +}; + +struct flow_keys_basic { + struct flow_dissector_key_control control; + struct flow_dissector_key_basic basic; +}; + +struct flow_keys { + struct flow_dissector_key_control control; + + struct flow_dissector_key_basic basic __attribute__((__aligned__(__alignof__(u64)))); + struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_icmp icmp; + + struct flow_dissector_key_addrs addrs; +}; + + + + +__be32 flow_get_u32_src(const struct flow_keys *flow); +__be32 flow_get_u32_dst(const struct flow_keys *flow); + +extern struct flow_dissector flow_keys_dissector; +extern struct flow_dissector flow_keys_basic_dissector; +# 329 "./include/net/flow_dissector.h" +struct flow_keys_digest { + u8 data[16]; +}; + +void make_flow_keys_digest(struct flow_keys_digest *digest, + const struct flow_keys *flow); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool flow_keys_have_l4(const struct flow_keys *keys) +{ + return (keys->ports.ports || keys->tags.flow_label); +} + +u32 flow_hash_from_keys(struct flow_keys *keys); +void skb_flow_get_icmp_tci(const struct sk_buff *skb, + struct flow_dissector_key_icmp *key_icmp, + void *data, int thoff, int hlen); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dissector_uses_key(const struct flow_dissector *flow_dissector, + enum flow_dissector_key_id key_id) +{ + return flow_dissector->used_keys & (1 << key_id); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_flow_dissector_target(struct flow_dissector *flow_dissector, + enum flow_dissector_key_id key_id, + void *target_container) +{ + return ((char *)target_container) + flow_dissector->offset[key_id]; +} + +struct bpf_flow_dissector { + struct bpf_flow_keys *flow_keys; + const struct sk_buff *skb; + void *data; + void *data_end; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +flow_dissector_init_keys(struct flow_dissector_key_control *key_control, + struct flow_dissector_key_basic *key_basic) +{ + memset(key_control, 0, sizeof(*key_control)); + memset(key_basic, 0, sizeof(*key_basic)); +} + + +int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog); +# 36 "./include/linux/skbuff.h" 2 +# 1 "./include/linux/splice.h" 1 +# 12 "./include/linux/splice.h" +# 1 "./include/linux/pipe_fs_i.h" 1 +# 26 "./include/linux/pipe_fs_i.h" +struct pipe_buffer { + struct page *page; + unsigned int offset, len; + const struct pipe_buf_operations *ops; + unsigned int flags; + unsigned long private; +}; +# 57 "./include/linux/pipe_fs_i.h" +struct pipe_inode_info { + struct mutex mutex; + wait_queue_head_t rd_wait, wr_wait; + unsigned int head; + unsigned int tail; + unsigned int max_usage; + unsigned int ring_size; + + bool note_loss; + + unsigned int nr_accounted; + unsigned int readers; + unsigned int writers; + unsigned int files; + unsigned int r_counter; + unsigned int w_counter; + struct page *tmp_page; + struct fasync_struct *fasync_readers; + struct fasync_struct *fasync_writers; + struct pipe_buffer *bufs; + struct user_struct *user; + + struct watch_queue *watch_queue; + +}; +# 93 "./include/linux/pipe_fs_i.h" +struct pipe_buf_operations { + + + + + + + + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + + + + + + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); +# 117 "./include/linux/pipe_fs_i.h" + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + + + + + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pipe_empty(unsigned int head, unsigned int tail) +{ + return head == tail; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int pipe_occupancy(unsigned int head, unsigned int tail) +{ + return head - tail; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pipe_full(unsigned int head, unsigned int tail, + unsigned int limit) +{ + return pipe_occupancy(head, tail) >= limit; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int pipe_space_for_user(unsigned int head, unsigned int tail, + struct pipe_inode_info *pipe) +{ + unsigned int p_occupancy, p_space; + + p_occupancy = pipe_occupancy(head, tail); + if (p_occupancy >= pipe->max_usage) + return 0; + p_space = pipe->ring_size - p_occupancy; + if (p_space > pipe->max_usage) + p_space = pipe->max_usage; + return p_space; +} +# 184 "./include/linux/pipe_fs_i.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) bool pipe_buf_get(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return buf->ops->get(pipe, buf); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + const struct pipe_buf_operations *ops = buf->ops; + + buf->ops = ((void *)0); + ops->release(pipe, buf); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pipe_buf_confirm(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + if (!buf->ops->confirm) + return 0; + return buf->ops->confirm(pipe, buf); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pipe_buf_try_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + if (!buf->ops->try_steal) + return false; + return buf->ops->try_steal(pipe, buf); +} + + + + + + +void pipe_lock(struct pipe_inode_info *); +void pipe_unlock(struct pipe_inode_info *); +void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); + +extern unsigned int pipe_max_size; +extern unsigned long pipe_user_pages_hard; +extern unsigned long pipe_user_pages_soft; + + +void pipe_wait(struct pipe_inode_info *pipe); + +struct pipe_inode_info *alloc_pipe_info(void); +void free_pipe_info(struct pipe_inode_info *); + + +bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); +bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *); +void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); + +extern const struct pipe_buf_operations nosteal_pipe_buf_ops; + + +unsigned long account_pipe_buffers(struct user_struct *user, + unsigned long old, unsigned long new); +bool too_many_pipe_buffers_soft(unsigned long user_bufs); +bool too_many_pipe_buffers_hard(unsigned long user_bufs); +bool pipe_is_unprivileged_user(void); + + + + +int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); + +long pipe_fcntl(struct file *, unsigned int, unsigned long arg); +struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); + +int create_pipe_files(struct file **, int); +unsigned int round_pipe_size(unsigned long size); +# 13 "./include/linux/splice.h" 2 +# 29 "./include/linux/splice.h" +struct splice_desc { + size_t total_len; + unsigned int len; + unsigned int flags; + + + + union { + void *userptr; + struct file *file; + void *data; + } u; + loff_t pos; + loff_t *opos; + size_t num_spliced; + bool need_wakeup; +}; + +struct partial_page { + unsigned int offset; + unsigned int len; + unsigned long private; +}; + + + + +struct splice_pipe_desc { + struct page **pages; + struct partial_page *partial; + int nr_pages; + unsigned int nr_pages_max; + const struct pipe_buf_operations *ops; + void (*spd_release)(struct splice_pipe_desc *, unsigned int); +}; + +typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *, + struct splice_desc *); +typedef int (splice_direct_actor)(struct pipe_inode_info *, + struct splice_desc *); + +extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, + loff_t *, size_t, unsigned int, + splice_actor *); +extern ssize_t __splice_from_pipe(struct pipe_inode_info *, + struct splice_desc *, splice_actor *); +extern ssize_t splice_to_pipe(struct pipe_inode_info *, + struct splice_pipe_desc *); +extern ssize_t add_to_pipe(struct pipe_inode_info *, + struct pipe_buffer *); +extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, + splice_direct_actor *); +extern long do_splice(struct file *in, loff_t *off_in, + struct file *out, loff_t *off_out, + size_t len, unsigned int flags); + +extern long do_tee(struct file *in, struct file *out, size_t len, + unsigned int flags); + + + + +extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *); +extern void splice_shrink_spd(struct splice_pipe_desc *); + +extern const struct pipe_buf_operations page_cache_pipe_buf_ops; +extern const struct pipe_buf_operations default_pipe_buf_ops; +# 37 "./include/linux/skbuff.h" 2 + +# 1 "./include/uapi/linux/if_packet.h" 1 + + + + + + +struct sockaddr_pkt { + unsigned short spkt_family; + unsigned char spkt_device[14]; + __be16 spkt_protocol; +}; + +struct sockaddr_ll { + unsigned short sll_family; + __be16 sll_protocol; + int sll_ifindex; + unsigned short sll_hatype; + unsigned char sll_pkttype; + unsigned char sll_halen; + unsigned char sll_addr[8]; +}; +# 74 "./include/uapi/linux/if_packet.h" +struct tpacket_stats { + unsigned int tp_packets; + unsigned int tp_drops; +}; + +struct tpacket_stats_v3 { + unsigned int tp_packets; + unsigned int tp_drops; + unsigned int tp_freeze_q_cnt; +}; + +struct tpacket_rollover_stats { + __u64 __attribute__((aligned(8))) tp_all; + __u64 __attribute__((aligned(8))) tp_huge; + __u64 __attribute__((aligned(8))) tp_failed; +}; + +union tpacket_stats_u { + struct tpacket_stats stats1; + struct tpacket_stats_v3 stats3; +}; + +struct tpacket_auxdata { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; +}; +# 131 "./include/uapi/linux/if_packet.h" +struct tpacket_hdr { + unsigned long tp_status; + unsigned int tp_len; + unsigned int tp_snaplen; + unsigned short tp_mac; + unsigned short tp_net; + unsigned int tp_sec; + unsigned int tp_usec; +}; + + + + + +struct tpacket2_hdr { + __u32 tp_status; + __u32 tp_len; + __u32 tp_snaplen; + __u16 tp_mac; + __u16 tp_net; + __u32 tp_sec; + __u32 tp_nsec; + __u16 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u8 tp_padding[4]; +}; + +struct tpacket_hdr_variant1 { + __u32 tp_rxhash; + __u32 tp_vlan_tci; + __u16 tp_vlan_tpid; + __u16 tp_padding; +}; + +struct tpacket3_hdr { + __u32 tp_next_offset; + __u32 tp_sec; + __u32 tp_nsec; + __u32 tp_snaplen; + __u32 tp_len; + __u32 tp_status; + __u16 tp_mac; + __u16 tp_net; + + union { + struct tpacket_hdr_variant1 hv1; + }; + __u8 tp_padding[8]; +}; + +struct tpacket_bd_ts { + unsigned int ts_sec; + union { + unsigned int ts_usec; + unsigned int ts_nsec; + }; +}; + +struct tpacket_hdr_v1 { + __u32 block_status; + __u32 num_pkts; + __u32 offset_to_first_pkt; + + + + + __u32 blk_len; +# 208 "./include/uapi/linux/if_packet.h" + __u64 __attribute__((aligned(8))) seq_num; +# 235 "./include/uapi/linux/if_packet.h" + struct tpacket_bd_ts ts_first_pkt, ts_last_pkt; +}; + +union tpacket_bd_header_u { + struct tpacket_hdr_v1 bh1; +}; + +struct tpacket_block_desc { + __u32 version; + __u32 offset_to_priv; + union tpacket_bd_header_u hdr; +}; + + + + +enum tpacket_versions { + TPACKET_V1, + TPACKET_V2, + TPACKET_V3 +}; +# 270 "./include/uapi/linux/if_packet.h" +struct tpacket_req { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; +}; + +struct tpacket_req3 { + unsigned int tp_block_size; + unsigned int tp_block_nr; + unsigned int tp_frame_size; + unsigned int tp_frame_nr; + unsigned int tp_retire_blk_tov; + unsigned int tp_sizeof_priv; + unsigned int tp_feature_req_word; +}; + +union tpacket_req_u { + struct tpacket_req req; + struct tpacket_req3 req3; +}; + +struct packet_mreq { + int mr_ifindex; + unsigned short mr_type; + unsigned short mr_alen; + unsigned char mr_address[8]; +}; +# 39 "./include/linux/skbuff.h" 2 +# 1 "./include/net/flow.h" 1 +# 25 "./include/net/flow.h" +struct flowi_tunnel { + __be64 tun_id; +}; + +struct flowi_common { + int flowic_oif; + int flowic_iif; + __u32 flowic_mark; + __u8 flowic_tos; + __u8 flowic_scope; + __u8 flowic_proto; + __u8 flowic_flags; + + + + __u32 flowic_secid; + kuid_t flowic_uid; + struct flowi_tunnel flowic_tun_key; + __u32 flowic_multipath_hash; +}; + +union flowi_uli { + struct { + __be16 dport; + __be16 sport; + } ports; + + struct { + __u8 type; + __u8 code; + } icmpt; + + struct { + __le16 dport; + __le16 sport; + } dnports; + + __be32 spi; + __be32 gre_key; + + struct { + __u8 type; + } mht; +}; + +struct flowi4 { + struct flowi_common __fl_common; +# 85 "./include/net/flow.h" + __be32 saddr; + __be32 daddr; + + union flowi_uli uli; + + + + + + + +} __attribute__((__aligned__(64/8))); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flowi4_init_output(struct flowi4 *fl4, int oif, + __u32 mark, __u8 tos, __u8 scope, + __u8 proto, __u8 flags, + __be32 daddr, __be32 saddr, + __be16 dport, __be16 sport, + kuid_t uid) +{ + fl4->__fl_common.flowic_oif = oif; + fl4->__fl_common.flowic_iif = 1; + fl4->__fl_common.flowic_mark = mark; + fl4->__fl_common.flowic_tos = tos; + fl4->__fl_common.flowic_scope = scope; + fl4->__fl_common.flowic_proto = proto; + fl4->__fl_common.flowic_flags = flags; + fl4->__fl_common.flowic_secid = 0; + fl4->__fl_common.flowic_tun_key.tun_id = 0; + fl4->__fl_common.flowic_uid = uid; + fl4->daddr = daddr; + fl4->saddr = saddr; + fl4->uli.ports.dport = dport; + fl4->uli.ports.sport = sport; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos, + __be32 daddr, __be32 saddr) +{ + fl4->__fl_common.flowic_oif = oif; + fl4->__fl_common.flowic_tos = tos; + fl4->daddr = daddr; + fl4->saddr = saddr; +} + + +struct flowi6 { + struct flowi_common __fl_common; +# 143 "./include/net/flow.h" + struct in6_addr daddr; + struct in6_addr saddr; + + __be32 flowlabel; + union flowi_uli uli; + + + + + + + + __u32 mp_hash; +} __attribute__((__aligned__(64/8))); + +struct flowidn { + struct flowi_common __fl_common; + + + + + + + __le16 daddr; + __le16 saddr; + union flowi_uli uli; + + +} __attribute__((__aligned__(64/8))); + +struct flowi { + union { + struct flowi_common __fl_common; + struct flowi4 ip4; + struct flowi6 ip6; + struct flowidn dn; + } u; +# 190 "./include/net/flow.h" +} __attribute__((__aligned__(64/8))); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct flowi *flowi4_to_flowi(struct flowi4 *fl4) +{ + return ({ void *__mptr = (void *)(fl4); do { extern void __compiletime_assert_1171(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(fl4)), typeof(((struct flowi *)0)->u.ip4)) && !__builtin_types_compatible_p(typeof(*(fl4)), typeof(void))))) __compiletime_assert_1171(); } while (0); ((struct flowi *)(__mptr - __builtin_offsetof(struct flowi, u.ip4))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct flowi *flowi6_to_flowi(struct flowi6 *fl6) +{ + return ({ void *__mptr = (void *)(fl6); do { extern void __compiletime_assert_1172(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(fl6)), typeof(((struct flowi *)0)->u.ip6)) && !__builtin_types_compatible_p(typeof(*(fl6)), typeof(void))))) __compiletime_assert_1172(); } while (0); ((struct flowi *)(__mptr - __builtin_offsetof(struct flowi, u.ip6))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct flowi *flowidn_to_flowi(struct flowidn *fldn) +{ + return ({ void *__mptr = (void *)(fldn); do { extern void __compiletime_assert_1173(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(fldn)), typeof(((struct flowi *)0)->u.dn)) && !__builtin_types_compatible_p(typeof(*(fldn)), typeof(void))))) __compiletime_assert_1173(); } while (0); ((struct flowi *)(__mptr - __builtin_offsetof(struct flowi, u.dn))); }); +} + +typedef unsigned long flow_compare_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int flow_key_size(u16 family) +{ + switch (family) { + case 2: + do { extern void __compiletime_assert_1174(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(struct flowi4) % sizeof(flow_compare_t)"))); if (!(!(sizeof(struct flowi4) % sizeof(flow_compare_t)))) __compiletime_assert_1174(); } while (0); + return sizeof(struct flowi4) / sizeof(flow_compare_t); + case 10: + do { extern void __compiletime_assert_1175(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(struct flowi6) % sizeof(flow_compare_t)"))); if (!(!(sizeof(struct flowi6) % sizeof(flow_compare_t)))) __compiletime_assert_1175(); } while (0); + return sizeof(struct flowi6) / sizeof(flow_compare_t); + case 12: + do { extern void __compiletime_assert_1176(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(struct flowidn) % sizeof(flow_compare_t)"))); if (!(!(sizeof(struct flowidn) % sizeof(flow_compare_t)))) __compiletime_assert_1176(); } while (0); + return sizeof(struct flowidn) / sizeof(flow_compare_t); + } + return 0; +} + +__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys); +# 40 "./include/linux/skbuff.h" 2 + +# 1 "./include/linux/netfilter/nf_conntrack_common.h" 1 + + + + + +# 1 "./include/uapi/linux/netfilter/nf_conntrack_common.h" 1 + + + + + + +enum ip_conntrack_info { + + IP_CT_ESTABLISHED, + + + + IP_CT_RELATED, + + + + IP_CT_NEW, + + + IP_CT_IS_REPLY, + + IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY, + IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY, + + + + IP_CT_NUMBER, + + + + + + IP_CT_UNTRACKED = 7, + +}; + + + + + + +enum ip_conntrack_status { + + IPS_EXPECTED_BIT = 0, + IPS_EXPECTED = (1 << IPS_EXPECTED_BIT), + + + IPS_SEEN_REPLY_BIT = 1, + IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT), + + + IPS_ASSURED_BIT = 2, + IPS_ASSURED = (1 << IPS_ASSURED_BIT), + + + IPS_CONFIRMED_BIT = 3, + IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT), + + + IPS_SRC_NAT_BIT = 4, + IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT), + + + IPS_DST_NAT_BIT = 5, + IPS_DST_NAT = (1 << IPS_DST_NAT_BIT), + + + IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT), + + + IPS_SEQ_ADJUST_BIT = 6, + IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT), + + + IPS_SRC_NAT_DONE_BIT = 7, + IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT), + + IPS_DST_NAT_DONE_BIT = 8, + IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT), + + + IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE), + + + IPS_DYING_BIT = 9, + IPS_DYING = (1 << IPS_DYING_BIT), + + + IPS_FIXED_TIMEOUT_BIT = 10, + IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT), + + + IPS_TEMPLATE_BIT = 11, + IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT), + + + IPS_UNTRACKED_BIT = 12, + IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT), + + + + + + + IPS_NAT_CLASH_BIT = IPS_UNTRACKED_BIT, + IPS_NAT_CLASH = IPS_UNTRACKED, + + + + IPS_HELPER_BIT = 13, + IPS_HELPER = (1 << IPS_HELPER_BIT), + + + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT), + + + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = (1 << IPS_HW_OFFLOAD_BIT), + + + + + IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK | + IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | + IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_UNTRACKED | + IPS_OFFLOAD | IPS_HW_OFFLOAD), + + __IPS_MAX_BIT = 16, +}; + + +enum ip_conntrack_events { + IPCT_NEW, + IPCT_RELATED, + IPCT_DESTROY, + IPCT_REPLY, + IPCT_ASSURED, + IPCT_PROTOINFO, + IPCT_HELPER, + IPCT_MARK, + IPCT_SEQADJ, + IPCT_NATSEQADJ = IPCT_SEQADJ, + IPCT_SECMARK, + IPCT_LABEL, + IPCT_SYNPROXY, + + __IPCT_MAX + +}; + +enum ip_conntrack_expect_events { + IPEXP_NEW, + IPEXP_DESTROY, +}; +# 7 "./include/linux/netfilter/nf_conntrack_common.h" 2 + +struct ip_conntrack_stat { + unsigned int found; + unsigned int invalid; + unsigned int ignore; + unsigned int insert; + unsigned int insert_failed; + unsigned int drop; + unsigned int early_drop; + unsigned int error; + unsigned int expect_new; + unsigned int expect_create; + unsigned int expect_delete; + unsigned int search_restart; +}; + + + + +struct nf_conntrack { + atomic_t use; +}; + +void nf_conntrack_destroy(struct nf_conntrack *nfct); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nf_conntrack_put(struct nf_conntrack *nfct) +{ + if (nfct && atomic_dec_and_test(&nfct->use)) + nf_conntrack_destroy(nfct); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nf_conntrack_get(struct nf_conntrack *nfct) +{ + if (nfct) + atomic_inc(&nfct->use); +} +# 42 "./include/linux/skbuff.h" 2 +# 241 "./include/linux/skbuff.h" +struct net_device; +struct scatterlist; +struct pipe_inode_info; +struct iov_iter; +struct napi_struct; +struct bpf_prog; +union bpf_attr; +struct skb_ext; + + +struct nf_bridge_info { + enum { + BRNF_PROTO_UNCHANGED, + BRNF_PROTO_8021Q, + BRNF_PROTO_PPPOE + } orig_proto:8; + u8 pkt_otherhost:1; + u8 in_prerouting:1; + u8 bridged_dnat:1; + __u16 frag_max_size; + struct net_device *physindev; + + + struct net_device *physoutdev; + union { + + __be32 ipv4_daddr; + struct in6_addr ipv6_daddr; + + + + + + char neigh_header[8]; + }; +}; + + + + + + + +struct tc_skb_ext { + __u32 chain; +}; + + +struct sk_buff_head { + + struct sk_buff *next; + struct sk_buff *prev; + + __u32 qlen; + spinlock_t lock; +}; + +struct sk_buff; +# 312 "./include/linux/skbuff.h" +extern int sysctl_max_skb_frags; + + + + + + +typedef struct bio_vec skb_frag_t; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_frag_size(const skb_frag_t *frag) +{ + return frag->bv_len; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_size_set(skb_frag_t *frag, unsigned int size) +{ + frag->bv_len = size; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_size_add(skb_frag_t *frag, int delta) +{ + frag->bv_len += delta; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->bv_len -= delta; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_frag_must_loop(struct page *p) +{ + + + + + return false; +} +# 416 "./include/linux/skbuff.h" +struct skb_shared_hwtstamps { + ktime_t hwtstamp; +}; + + +enum { + + SKBTX_HW_TSTAMP = 1 << 0, + + + SKBTX_SW_TSTAMP = 1 << 1, + + + SKBTX_IN_PROGRESS = 1 << 2, + + + SKBTX_DEV_ZEROCOPY = 1 << 3, + + + SKBTX_WIFI_STATUS = 1 << 4, + + + + + + + SKBTX_SHARED_FRAG = 1 << 5, + + + SKBTX_SCHED_TSTAMP = 1 << 6, +}; +# 461 "./include/linux/skbuff.h" +struct ubuf_info { + void (*callback)(struct ubuf_info *, bool zerocopy_success); + union { + struct { + unsigned long desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy:1; + u32 bytelen; + }; + }; + refcount_t refcnt; + + struct mmpin { + struct user_struct *user; + unsigned int num_pg; + } mmp; +}; + + + +int mm_account_pinned_pages(struct mmpin *mmp, size_t size); +void mm_unaccount_pinned_pages(struct mmpin *mmp); + +struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size); +struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, + struct ubuf_info *uarg); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_zerocopy_get(struct ubuf_info *uarg) +{ + refcount_inc(&uarg->refcnt); +} + +void sock_zerocopy_put(struct ubuf_info *uarg); +void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); + +void sock_zerocopy_callback(struct ubuf_info *uarg, bool success); + +int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len); +int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, + struct msghdr *msg, int len, + struct ubuf_info *uarg); + + + + +struct skb_shared_info { + __u8 __unused; + __u8 meta_len; + __u8 nr_frags; + __u8 tx_flags; + unsigned short gso_size; + + unsigned short gso_segs; + struct sk_buff *frag_list; + struct skb_shared_hwtstamps hwtstamps; + unsigned int gso_type; + u32 tskey; + + + + + atomic_t dataref; + + + + void * destructor_arg; + + + skb_frag_t frags[(65536/((1UL) << 12) + 1)]; +}; +# 551 "./include/linux/skbuff.h" +enum { + SKB_FCLONE_UNAVAILABLE, + SKB_FCLONE_ORIG, + SKB_FCLONE_CLONE, +}; + +enum { + SKB_GSO_TCPV4 = 1 << 0, + + + SKB_GSO_DODGY = 1 << 1, + + + SKB_GSO_TCP_ECN = 1 << 2, + + SKB_GSO_TCP_FIXEDID = 1 << 3, + + SKB_GSO_TCPV6 = 1 << 4, + + SKB_GSO_FCOE = 1 << 5, + + SKB_GSO_GRE = 1 << 6, + + SKB_GSO_GRE_CSUM = 1 << 7, + + SKB_GSO_IPXIP4 = 1 << 8, + + SKB_GSO_IPXIP6 = 1 << 9, + + SKB_GSO_UDP_TUNNEL = 1 << 10, + + SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, + + SKB_GSO_PARTIAL = 1 << 12, + + SKB_GSO_TUNNEL_REMCSUM = 1 << 13, + + SKB_GSO_SCTP = 1 << 14, + + SKB_GSO_ESP = 1 << 15, + + SKB_GSO_UDP = 1 << 16, + + SKB_GSO_UDP_L4 = 1 << 17, + + SKB_GSO_FRAGLIST = 1 << 18, +}; + + + + + + +typedef unsigned int sk_buff_data_t; +# 711 "./include/linux/skbuff.h" +struct sk_buff { + union { + struct { + + struct sk_buff *next; + struct sk_buff *prev; + + union { + struct net_device *dev; + + + + + unsigned long dev_scratch; + }; + }; + struct rb_node rbnode; + struct list_head list; + }; + + union { + struct sock *sk; + int ip_defrag_offset; + }; + + union { + ktime_t tstamp; + u64 skb_mstamp_ns; + }; + + + + + + + char cb[48] __attribute__((__aligned__(8))); + + union { + struct { + unsigned long _skb_refdst; + void (*destructor)(struct sk_buff *skb); + }; + struct list_head tcp_tsorted_anchor; + }; + + + unsigned long _nfct; + + unsigned int len, + data_len; + __u16 mac_len, + hdr_len; + + + + + __u16 queue_mapping; +# 778 "./include/linux/skbuff.h" + __u8 __cloned_offset[0]; + + __u8 cloned:1, + nohdr:1, + fclone:2, + peeked:1, + head_frag:1, + pfmemalloc:1; + + __u8 active_extensions; + + + + + + __u32 headers_start[0]; +# 805 "./include/linux/skbuff.h" + __u8 __pkt_type_offset[0]; + + __u8 pkt_type:3; + __u8 ignore_df:1; + __u8 nf_trace:1; + __u8 ip_summed:2; + __u8 ooo_okay:1; + + __u8 l4_hash:1; + __u8 sw_hash:1; + __u8 wifi_acked_valid:1; + __u8 wifi_acked:1; + __u8 no_fcs:1; + + __u8 encapsulation:1; + __u8 encap_hdr_csum:1; + __u8 csum_valid:1; +# 830 "./include/linux/skbuff.h" + __u8 __pkt_vlan_present_offset[0]; + + __u8 vlan_present:1; + __u8 csum_complete_sw:1; + __u8 csum_level:2; + __u8 csum_not_inet:1; + __u8 dst_pending_confirm:1; + + __u8 ndisc_nodetype:2; + + + __u8 ipvs_property:1; + __u8 inner_protocol_type:1; + __u8 remcsum_offload:1; + + __u8 offload_fwd_mark:1; + __u8 offload_l3_fwd_mark:1; + + + __u8 tc_skip_classify:1; + __u8 tc_at_ingress:1; + + + __u8 redirected:1; + __u8 from_ingress:1; + + + __u8 decrypted:1; + + + + __u16 tc_index; + + + union { + __wsum csum; + struct { + __u16 csum_start; + __u16 csum_offset; + }; + }; + __u32 priority; + int skb_iif; + __u32 hash; + __be16 vlan_proto; + __u16 vlan_tci; + + union { + unsigned int napi_id; + unsigned int sender_cpu; + }; + + + __u32 secmark; + + + union { + __u32 mark; + __u32 reserved_tailroom; + }; + + union { + __be16 inner_protocol; + __u8 inner_ipproto; + }; + + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + + __be16 protocol; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; + + + __u32 headers_end[0]; + + + + sk_buff_data_t tail; + sk_buff_data_t end; + unsigned char *head, + *data; + unsigned int truesize; + refcount_t users; + + + + struct skb_ext *extensions; + +}; +# 936 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_pfmemalloc(const struct sk_buff *skb) +{ + return __builtin_expect(!!(skb->pfmemalloc), 0); +} +# 954 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dst_entry *skb_dst(const struct sk_buff *skb) +{ + + + + ({ int __ret_warn_on = !!((skb->_skb_refdst & 1UL) && !rcu_read_lock_held() && !rcu_read_lock_bh_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1177)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (959), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1178)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1179)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }) + + ; + return (struct dst_entry *)(skb->_skb_refdst & ~(1UL)); +} +# 973 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) +{ + skb->_skb_refdst = (unsigned long)dst; +} +# 988 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) +{ + ({ int __ret_warn_on = !!(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1180)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (990), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1181)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1182)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + skb->_skb_refdst = (unsigned long)dst | 1UL; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_dst_is_noref(const struct sk_buff *skb) +{ + return (skb->_skb_refdst & 1UL) && skb_dst(skb); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rtable *skb_rtable(const struct sk_buff *skb) +{ + return (struct rtable *)skb_dst(skb); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_pkt_type_ok(u32 ptype) +{ + return ptype <= 3; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_napi_id(const struct sk_buff *skb) +{ + + return skb->napi_id; + + + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_unref(struct sk_buff *skb) +{ + if (__builtin_expect(!!(!skb), 0)) + return false; + if (__builtin_expect(!!(refcount_read(&skb->users) == 1), 1)) + __asm__ __volatile__("": : :"memory"); + else if (__builtin_expect(!!(!refcount_dec_and_test(&skb->users)), 1)) + return false; + + return true; +} + +void skb_release_head_state(struct sk_buff *skb); +void kfree_skb(struct sk_buff *skb); +void kfree_skb_list(struct sk_buff *segs); +void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); +void skb_tx_error(struct sk_buff *skb); +void consume_skb(struct sk_buff *skb); +void __consume_stateless_skb(struct sk_buff *skb); +void __kfree_skb(struct sk_buff *skb); +extern struct kmem_cache *skbuff_head_cache; + +void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); +bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, + bool *fragstolen, int *delta_truesize); + +struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, + int node); +struct sk_buff *__build_skb(void *data, unsigned int frag_size); +struct sk_buff *build_skb(void *data, unsigned int frag_size); +struct sk_buff *build_skb_around(struct sk_buff *skb, + void *data, unsigned int frag_size); +# 1080 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *alloc_skb(unsigned int size, + gfp_t priority) +{ + return __alloc_skb(size, priority, 0, (-1)); +} + +struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + unsigned long data_len, + int max_page_order, + int *errcode, + gfp_t gfp_mask); +struct sk_buff *alloc_skb_for_msg(struct sk_buff *first); + + +struct sk_buff_fclones { + struct sk_buff skb1; + + struct sk_buff skb2; + + refcount_t fclone_ref; +}; +# 1111 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_fclone_busy(const struct sock *sk, + const struct sk_buff *skb) +{ + const struct sk_buff_fclones *fclones; + + fclones = ({ void *__mptr = (void *)(skb); do { extern void __compiletime_assert_1183(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(skb)), typeof(((struct sk_buff_fclones *)0)->skb1)) && !__builtin_types_compatible_p(typeof(*(skb)), typeof(void))))) __compiletime_assert_1183(); } while (0); ((struct sk_buff_fclones *)(__mptr - __builtin_offsetof(struct sk_buff_fclones, skb1))); }); + + return skb->fclone == SKB_FCLONE_ORIG && + refcount_read(&fclones->fclone_ref) > 1 && + fclones->skb2.sk == sk; +} +# 1130 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *alloc_skb_fclone(unsigned int size, + gfp_t priority) +{ + return __alloc_skb(size, priority, 0x01, (-1)); +} + +struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); +void skb_headers_offset_update(struct sk_buff *skb, int off); +int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); +struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); +void skb_copy_header(struct sk_buff *new, const struct sk_buff *old); +struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); +struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, + gfp_t gfp_mask, bool fclone); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, + gfp_t gfp_mask) +{ + return __pskb_copy_fclone(skb, headroom, gfp_mask, false); +} + +int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); +struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, + unsigned int headroom); +struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, + int newtailroom, gfp_t priority); +int __attribute__((__warn_unused_result__)) skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int __attribute__((__warn_unused_result__)) skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); +int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error); +# 1173 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_pad(struct sk_buff *skb, int pad) +{ + return __skb_pad(skb, pad, true); +} + + +int skb_append_pagefrags(struct sk_buff *skb, struct page *page, + int offset, size_t size); + +struct skb_seq_state { + __u32 lower_offset; + __u32 upper_offset; + __u32 frag_idx; + __u32 stepped_offset; + struct sk_buff *root_skb; + struct sk_buff *cur_skb; + __u8 *frag_data; +}; + +void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, + unsigned int to, struct skb_seq_state *st); +unsigned int skb_seq_read(unsigned int consumed, const u8 **data, + struct skb_seq_state *st); +void skb_abort_seq_read(struct skb_seq_state *st); + +unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, + unsigned int to, struct ts_config *config); +# 1227 "./include/linux/skbuff.h" +enum pkt_hash_types { + PKT_HASH_TYPE_NONE, + PKT_HASH_TYPE_L2, + PKT_HASH_TYPE_L3, + PKT_HASH_TYPE_L4, +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_clear_hash(struct sk_buff *skb) +{ + skb->hash = 0; + skb->sw_hash = 0; + skb->l4_hash = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_clear_hash_if_not_l4(struct sk_buff *skb) +{ + if (!skb->l4_hash) + skb_clear_hash(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4) +{ + skb->l4_hash = is_l4; + skb->sw_hash = is_sw; + skb->hash = hash; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) +{ + + __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) +{ + __skb_set_hash(skb, hash, true, is_l4); +} + +void __skb_get_hash(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(const struct sk_buff *skb); +u32 skb_get_poff(const struct sk_buff *skb); +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys_basic *keys, int hlen); +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + void *data, int hlen_proto); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __be32 skb_flow_get_ports(const struct sk_buff *skb, + int thoff, u8 ip_proto) +{ + return __skb_flow_get_ports(skb, thoff, ip_proto, ((void *)0), 0); +} + +void skb_flow_dissector_init(struct flow_dissector *flow_dissector, + const struct flow_dissector_key *key, + unsigned int key_count); + +struct bpf_flow_dissector; +bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, + __be16 proto, int nhoff, int hlen, unsigned int flags); + +bool __skb_flow_dissect(const struct net *net, + const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, + void *data, __be16 proto, int nhoff, int hlen, + unsigned int flags); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_flow_dissect(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, unsigned int flags) +{ + return __skb_flow_dissect(((void *)0), skb, flow_dissector, + target_container, ((void *)0), 0, 0, 0, flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int flags) +{ + memset(flow, 0, sizeof(*flow)); + return __skb_flow_dissect(((void *)0), skb, &flow_keys_dissector, + flow, ((void *)0), 0, 0, 0, flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +skb_flow_dissect_flow_keys_basic(const struct net *net, + const struct sk_buff *skb, + struct flow_keys_basic *flow, void *data, + __be16 proto, int nhoff, int hlen, + unsigned int flags) +{ + memset(flow, 0, sizeof(*flow)); + return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, + data, proto, nhoff, hlen, flags); +} + +void skb_flow_dissect_meta(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + + + + + +void +skb_flow_dissect_ct(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, + u16 *ctinfo_map, + size_t mapsize); +void +skb_flow_dissect_tunnel_info(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 skb_get_hash(struct sk_buff *skb) +{ + if (!skb->l4_hash && !skb->sw_hash) + __skb_get_hash(skb); + + return skb->hash; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) +{ + if (!skb->l4_hash && !skb->sw_hash) { + struct flow_keys keys; + __u32 hash = __get_hash_from_flowi6(fl6, &keys); + + __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); + } + + return skb->hash; +} + +__u32 skb_get_hash_perturb(const struct sk_buff *skb, + const siphash_key_t *perturb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ + return skb->hash; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) +{ + to->hash = from->hash; + to->sw_hash = from->sw_hash; + to->l4_hash = from->l4_hash; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_copy_decrypted(struct sk_buff *to, + const struct sk_buff *from) +{ + + to->decrypted = from->decrypted; + +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_end_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->end; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_end_offset(const struct sk_buff *skb) +{ + return skb->end; +} +# 1413 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) +{ + return &((struct skb_shared_info *)(skb_end_pointer(skb)))->hwtstamps; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct ubuf_info *skb_zcopy(struct sk_buff *skb) +{ + bool is_zcopy = skb && ((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags & SKBTX_DEV_ZEROCOPY; + + return is_zcopy ? ((struct ubuf_info *)(((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg)) : ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, + bool *have_ref) +{ + if (skb && uarg && !skb_zcopy(skb)) { + if (__builtin_expect(!!(have_ref && *have_ref), 0)) + *have_ref = false; + else + sock_zerocopy_get(uarg); + ((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg = uarg; + ((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags |= (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) +{ + ((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg = (void *)((uintptr_t) val | 0x1UL); + ((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags |= (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_zcopy_is_nouarg(struct sk_buff *skb) +{ + return (uintptr_t) ((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg & 0x1UL; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_zcopy_get_nouarg(struct sk_buff *skb) +{ + return (void *)((uintptr_t) ((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg & ~0x1UL); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) +{ + struct ubuf_info *uarg = skb_zcopy(skb); + + if (uarg) { + if (skb_zcopy_is_nouarg(skb)) { + + } else if (uarg->callback == sock_zerocopy_callback) { + uarg->zerocopy = uarg->zerocopy && zerocopy; + sock_zerocopy_put(uarg); + } else { + uarg->callback(uarg, zerocopy); + } + + ((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags &= ~(SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG); + } +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_zcopy_abort(struct sk_buff *skb) +{ + struct ubuf_info *uarg = skb_zcopy(skb); + + if (uarg) { + sock_zerocopy_put_abort(uarg, false); + ((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags &= ~(SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_mark_not_on_list(struct sk_buff *skb) +{ + skb->next = ((void *)0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_list_del_init(struct sk_buff *skb) +{ + __list_del_entry(&skb->list); + skb_mark_not_on_list(skb); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_queue_empty(const struct sk_buff_head *list) +{ + return list->next == (const struct sk_buff *) list; +} +# 1518 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_queue_empty_lockless(const struct sk_buff_head *list) +{ + return ({ do { extern void __compiletime_assert_1184(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->next) == sizeof(char) || sizeof(list->next) == sizeof(short) || sizeof(list->next) == sizeof(int) || sizeof(list->next) == sizeof(long)) || sizeof(list->next) == sizeof(long long))) __compiletime_assert_1184(); } while (0); ({ typeof( _Generic((list->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list->next))) __x = (*(const volatile typeof( _Generic((list->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list->next))) *)&(list->next)); do { } while (0); (typeof(list->next))__x; }); }) == (const struct sk_buff *) list; +} +# 1531 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return skb->next == (const struct sk_buff *) list; +} +# 1544 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_queue_is_first(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return skb->prev == (const struct sk_buff *) list; +} +# 1558 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + + + + do { if (__builtin_expect(!!(skb_queue_is_last(list, skb)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1185)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (1564), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1186)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + return skb->next; +} +# 1576 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + + + + do { if (__builtin_expect(!!(skb_queue_is_first(list, skb)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1187)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (1582), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1188)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + return skb->prev; +} +# 1593 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *skb_get(struct sk_buff *skb) +{ + refcount_inc(&skb->users); + return skb; +} +# 1611 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_cloned(const struct sk_buff *skb) +{ + return skb->cloned && + (atomic_read(&((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref) & ((1 << 16) - 1)) != 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_unclone(struct sk_buff *skb, gfp_t pri) +{ + do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1619, 0); do { } while (0); } while (0); } while (0); + + if (skb_cloned(skb)) + return pskb_expand_head(skb, 0, 0, pri); + + return 0; +} +# 1634 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_header_cloned(const struct sk_buff *skb) +{ + int dataref; + + if (!skb->cloned) + return 0; + + dataref = atomic_read(&((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref); + dataref = (dataref & ((1 << 16) - 1)) - (dataref >> 16); + return dataref != 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_header_unclone(struct sk_buff *skb, gfp_t pri) +{ + do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1648, 0); do { } while (0); } while (0); } while (0); + + if (skb_header_cloned(skb)) + return pskb_expand_head(skb, 0, 0, pri); + + return 0; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_header_release(struct sk_buff *skb) +{ + skb->nohdr = 1; + atomic_set(&((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref, 1 + (1 << 16)); +} +# 1674 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_shared(const struct sk_buff *skb) +{ + return refcount_read(&skb->users) != 1; +} +# 1692 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) +{ + do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1694, 0); do { } while (0); } while (0); } while (0); + if (skb_shared(skb)) { + struct sk_buff *nskb = skb_clone(skb, pri); + + if (__builtin_expect(!!(nskb), 1)) + consume_skb(skb); + else + kfree_skb(skb); + skb = nskb; + } + return skb; +} +# 1727 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *skb_unshare(struct sk_buff *skb, + gfp_t pri) +{ + do { if (gfpflags_allow_blocking(pri)) do { __might_sleep("include/linux/skbuff.h", 1730, 0); do { } while (0); } while (0); } while (0); + if (skb_cloned(skb)) { + struct sk_buff *nskb = skb_copy(skb, pri); + + + if (__builtin_expect(!!(nskb), 1)) + consume_skb(skb); + else + kfree_skb(skb); + skb = nskb; + } + return skb; +} +# 1757 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *skb_peek(const struct sk_buff_head *list_) +{ + struct sk_buff *skb = list_->next; + + if (skb == (struct sk_buff *)list_) + skb = ((void *)0); + return skb; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *__skb_peek(const struct sk_buff_head *list_) +{ + return list_->next; +} +# 1786 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *skb_peek_next(struct sk_buff *skb, + const struct sk_buff_head *list_) +{ + struct sk_buff *next = skb->next; + + if (next == (struct sk_buff *)list_) + next = ((void *)0); + return next; +} +# 1809 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) +{ + struct sk_buff *skb = ({ do { extern void __compiletime_assert_1189(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list_->prev) == sizeof(char) || sizeof(list_->prev) == sizeof(short) || sizeof(list_->prev) == sizeof(int) || sizeof(list_->prev) == sizeof(long)) || sizeof(list_->prev) == sizeof(long long))) __compiletime_assert_1189(); } while (0); ({ typeof( _Generic((list_->prev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list_->prev))) __x = (*(const volatile typeof( _Generic((list_->prev), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list_->prev))) *)&(list_->prev)); do { } while (0); (typeof(list_->prev))__x; }); }); + + if (skb == (struct sk_buff *)list_) + skb = ((void *)0); + return skb; + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 skb_queue_len(const struct sk_buff_head *list_) +{ + return list_->qlen; +} +# 1837 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) +{ + return ({ do { extern void __compiletime_assert_1190(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list_->qlen) == sizeof(char) || sizeof(list_->qlen) == sizeof(short) || sizeof(list_->qlen) == sizeof(int) || sizeof(list_->qlen) == sizeof(long)) || sizeof(list_->qlen) == sizeof(long long))) __compiletime_assert_1190(); } while (0); ({ typeof( _Generic((list_->qlen), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list_->qlen))) __x = (*(const volatile typeof( _Generic((list_->qlen), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (list_->qlen))) *)&(list_->qlen)); do { } while (0); (typeof(list_->qlen))__x; }); }); +} +# 1852 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +# 1866 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_queue_head_init(struct sk_buff_head *list) +{ + do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&list->lock), "&list->lock", &__key, LD_WAIT_CONFIG); } while (0); + __skb_queue_head_init(list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) +{ + skb_queue_head_init(list); + lockdep_init_map_waits(&(&list->lock)->dep_map, "class", class, 0, (&list->lock)->dep_map.wait_type_inner, (&list->lock)->dep_map.wait_type_outer); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_insert(struct sk_buff *newsk, + struct sk_buff *prev, struct sk_buff *next, + struct sk_buff_head *list) +{ + + + + do { do { extern void __compiletime_assert_1191(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(newsk->next) == sizeof(char) || sizeof(newsk->next) == sizeof(short) || sizeof(newsk->next) == sizeof(int) || sizeof(newsk->next) == sizeof(long)) || sizeof(newsk->next) == sizeof(long long))) __compiletime_assert_1191(); } while (0); do { *(volatile typeof(newsk->next) *)&(newsk->next) = (next); } while (0); } while (0); + do { do { extern void __compiletime_assert_1192(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(newsk->prev) == sizeof(char) || sizeof(newsk->prev) == sizeof(short) || sizeof(newsk->prev) == sizeof(int) || sizeof(newsk->prev) == sizeof(long)) || sizeof(newsk->prev) == sizeof(long long))) __compiletime_assert_1192(); } while (0); do { *(volatile typeof(newsk->prev) *)&(newsk->prev) = (prev); } while (0); } while (0); + do { do { extern void __compiletime_assert_1193(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(next->prev) == sizeof(char) || sizeof(next->prev) == sizeof(short) || sizeof(next->prev) == sizeof(int) || sizeof(next->prev) == sizeof(long)) || sizeof(next->prev) == sizeof(long long))) __compiletime_assert_1193(); } while (0); do { *(volatile typeof(next->prev) *)&(next->prev) = (newsk); } while (0); } while (0); + do { do { extern void __compiletime_assert_1194(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_1194(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (newsk); } while (0); } while (0); + list->qlen++; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *next) +{ + struct sk_buff *first = list->next; + struct sk_buff *last = list->prev; + + do { do { extern void __compiletime_assert_1195(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(first->prev) == sizeof(char) || sizeof(first->prev) == sizeof(short) || sizeof(first->prev) == sizeof(int) || sizeof(first->prev) == sizeof(long)) || sizeof(first->prev) == sizeof(long long))) __compiletime_assert_1195(); } while (0); do { *(volatile typeof(first->prev) *)&(first->prev) = (prev); } while (0); } while (0); + do { do { extern void __compiletime_assert_1196(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_1196(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (first); } while (0); } while (0); + + do { do { extern void __compiletime_assert_1197(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(last->next) == sizeof(char) || sizeof(last->next) == sizeof(short) || sizeof(last->next) == sizeof(int) || sizeof(last->next) == sizeof(long)) || sizeof(last->next) == sizeof(long long))) __compiletime_assert_1197(); } while (0); do { *(volatile typeof(last->next) *)&(last->next) = (next); } while (0); } while (0); + do { do { extern void __compiletime_assert_1198(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(next->prev) == sizeof(char) || sizeof(next->prev) == sizeof(short) || sizeof(next->prev) == sizeof(int) || sizeof(next->prev) == sizeof(long)) || sizeof(next->prev) == sizeof(long long))) __compiletime_assert_1198(); } while (0); do { *(volatile typeof(next->prev) *)&(next->prev) = (last); } while (0); } while (0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + } +} +# 1934 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_queue_splice_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_queue_splice_tail(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + } +} +# 1966 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_queue_splice_tail_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} +# 1987 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_queue_after(struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *newsk) +{ + __skb_insert(newsk, prev, prev->next, list); +} + +void skb_append(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_queue_before(struct sk_buff_head *list, + struct sk_buff *next, + struct sk_buff *newsk) +{ + __skb_insert(newsk, next->prev, next, list); +} +# 2014 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_queue_head(struct sk_buff_head *list, + struct sk_buff *newsk) +{ + __skb_queue_after(list, (struct sk_buff *)list, newsk); +} +void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); +# 2031 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_queue_tail(struct sk_buff_head *list, + struct sk_buff *newsk) +{ + __skb_queue_before(list, (struct sk_buff *)list, newsk); +} +void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); + + + + + +void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) +{ + struct sk_buff *next, *prev; + + do { do { extern void __compiletime_assert_1199(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->qlen) == sizeof(char) || sizeof(list->qlen) == sizeof(short) || sizeof(list->qlen) == sizeof(int) || sizeof(list->qlen) == sizeof(long)) || sizeof(list->qlen) == sizeof(long long))) __compiletime_assert_1199(); } while (0); do { *(volatile typeof(list->qlen) *)&(list->qlen) = (list->qlen - 1); } while (0); } while (0); + next = skb->next; + prev = skb->prev; + skb->next = skb->prev = ((void *)0); + do { do { extern void __compiletime_assert_1200(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(next->prev) == sizeof(char) || sizeof(next->prev) == sizeof(short) || sizeof(next->prev) == sizeof(int) || sizeof(next->prev) == sizeof(long)) || sizeof(next->prev) == sizeof(long long))) __compiletime_assert_1200(); } while (0); do { *(volatile typeof(next->prev) *)&(next->prev) = (prev); } while (0); } while (0); + do { do { extern void __compiletime_assert_1201(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(prev->next) == sizeof(char) || sizeof(prev->next) == sizeof(short) || sizeof(prev->next) == sizeof(int) || sizeof(prev->next) == sizeof(long)) || sizeof(prev->next) == sizeof(long long))) __compiletime_assert_1201(); } while (0); do { *(volatile typeof(prev->next) *)&(prev->next) = (next); } while (0); } while (0); +} +# 2063 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *__skb_dequeue(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} +struct sk_buff *skb_dequeue(struct sk_buff_head *list); +# 2080 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) +{ + struct sk_buff *skb = skb_peek_tail(list); + if (skb) + __skb_unlink(skb, list); + return skb; +} +struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_is_nonlinear(const struct sk_buff *skb) +{ + return skb->data_len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_headlen(const struct sk_buff *skb) +{ + return skb->len - skb->data_len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __skb_pagelen(const struct sk_buff *skb) +{ + unsigned int i, len = 0; + + for (i = ((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags - 1; (int)i >= 0; i--) + len += skb_frag_size(&((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i]); + return len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_pagelen(const struct sk_buff *skb) +{ + return skb_headlen(skb) + __skb_pagelen(skb); +} +# 2127 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) +{ + skb_frag_t *frag = &((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i]; + + + + + + + frag->bv_page = page; + frag->bv_offset = off; + skb_frag_size_set(frag, size); + + page = compound_head(page); + if (page_is_pfmemalloc(page)) + skb->pfmemalloc = true; +} +# 2160 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_fill_page_desc(struct sk_buff *skb, int i, + struct page *page, int off, int size) +{ + __skb_fill_page_desc(skb, i, page, off, size); + ((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags = i + 1; +} + +void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, + int size, unsigned int truesize); + +void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, + unsigned int truesize); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_tail_pointer(const struct sk_buff *skb) +{ + return skb->head + skb->tail; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_tail_pointer(struct sk_buff *skb) +{ + skb->tail = skb->data - skb->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_tail_pointer(struct sk_buff *skb, const int offset) +{ + skb_reset_tail_pointer(skb); + skb->tail += offset; +} +# 2213 "./include/linux/skbuff.h" +void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); +void *skb_put(struct sk_buff *skb, unsigned int len); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *__skb_put(struct sk_buff *skb, unsigned int len) +{ + void *tmp = skb_tail_pointer(skb); + do { if (__builtin_expect(!!(skb_is_nonlinear(skb)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1202)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2218), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1203)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + skb->tail += len; + skb->len += len; + return tmp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *__skb_put_zero(struct sk_buff *skb, unsigned int len) +{ + void *tmp = __skb_put(skb, len); + + memset(tmp, 0, len); + return tmp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *__skb_put_data(struct sk_buff *skb, const void *data, + unsigned int len) +{ + void *tmp = __skb_put(skb, len); + + memcpy(tmp, data, len); + return tmp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_put_u8(struct sk_buff *skb, u8 val) +{ + *(u8 *)__skb_put(skb, 1) = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_put_zero(struct sk_buff *skb, unsigned int len) +{ + void *tmp = skb_put(skb, len); + + memset(tmp, 0, len); + + return tmp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_put_data(struct sk_buff *skb, const void *data, + unsigned int len) +{ + void *tmp = skb_put(skb, len); + + memcpy(tmp, data, len); + + return tmp; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_put_u8(struct sk_buff *skb, u8 val) +{ + *(u8 *)skb_put(skb, 1) = val; +} + +void *skb_push(struct sk_buff *skb, unsigned int len); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *__skb_push(struct sk_buff *skb, unsigned int len) +{ + skb->data -= len; + skb->len += len; + return skb->data; +} + +void *skb_pull(struct sk_buff *skb, unsigned int len); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *__skb_pull(struct sk_buff *skb, unsigned int len) +{ + skb->len -= len; + do { if (__builtin_expect(!!(skb->len < skb->data_len), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1204)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2282), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1205)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + return skb->data += len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_pull_inline(struct sk_buff *skb, unsigned int len) +{ + return __builtin_expect(!!(len > skb->len), 0) ? ((void *)0) : __skb_pull(skb, len); +} + +void *__pskb_pull_tail(struct sk_buff *skb, int delta); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *__pskb_pull(struct sk_buff *skb, unsigned int len) +{ + if (len > skb_headlen(skb) && + !__pskb_pull_tail(skb, len - skb_headlen(skb))) + return ((void *)0); + skb->len -= len; + return skb->data += len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *pskb_pull(struct sk_buff *skb, unsigned int len) +{ + return __builtin_expect(!!(len > skb->len), 0) ? ((void *)0) : __pskb_pull(skb, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool pskb_may_pull(struct sk_buff *skb, unsigned int len) +{ + if (__builtin_expect(!!(len <= skb_headlen(skb)), 1)) + return true; + if (__builtin_expect(!!(len > skb->len), 0)) + return false; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != ((void *)0); +} + +void skb_condense(struct sk_buff *skb); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_headroom(const struct sk_buff *skb) +{ + return skb->data - skb->head; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_tailroom(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; +} +# 2347 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_availroom(const struct sk_buff *skb) +{ + if (skb_is_nonlinear(skb)) + return 0; + + return skb->end - skb->tail - skb->reserved_tailroom; +} +# 2363 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reserve(struct sk_buff *skb, int len) +{ + skb->data += len; + skb->tail += len; +} +# 2381 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, + unsigned int needed_tailroom) +{ + do { if (__builtin_expect(!!(skb_is_nonlinear(skb)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1206)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2384), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1207)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + if (mtu < skb_tailroom(skb) - needed_tailroom) + + skb->reserved_tailroom = skb_tailroom(skb) - mtu; + else + + skb->reserved_tailroom = needed_tailroom; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_inner_protocol(struct sk_buff *skb, + __be16 protocol) +{ + skb->inner_protocol = protocol; + skb->inner_protocol_type = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_inner_ipproto(struct sk_buff *skb, + __u8 ipproto) +{ + skb->inner_ipproto = ipproto; + skb->inner_protocol_type = 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_inner_headers(struct sk_buff *skb) +{ + skb->inner_mac_header = skb->mac_header; + skb->inner_network_header = skb->network_header; + skb->inner_transport_header = skb->transport_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_mac_len(struct sk_buff *skb) +{ + skb->mac_len = skb->network_header - skb->mac_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_inner_transport_header(const struct sk_buff + *skb) +{ + return skb->head + skb->inner_transport_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_inner_transport_offset(const struct sk_buff *skb) +{ + return skb_inner_transport_header(skb) - skb->data; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_inner_transport_header(struct sk_buff *skb) +{ + skb->inner_transport_header = skb->data - skb->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_inner_transport_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_inner_transport_header(skb); + skb->inner_transport_header += offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_inner_network_header(const struct sk_buff *skb) +{ + return skb->head + skb->inner_network_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_inner_network_header(struct sk_buff *skb) +{ + skb->inner_network_header = skb->data - skb->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_inner_network_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_inner_network_header(skb); + skb->inner_network_header += offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_inner_mac_header(const struct sk_buff *skb) +{ + return skb->head + skb->inner_mac_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_inner_mac_header(struct sk_buff *skb) +{ + skb->inner_mac_header = skb->data - skb->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_inner_mac_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_inner_mac_header(skb); + skb->inner_mac_header += offset; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_transport_header_was_set(const struct sk_buff *skb) +{ + return skb->transport_header != (typeof(skb->transport_header))~0U; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_transport_header(const struct sk_buff *skb) +{ + return skb->head + skb->transport_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_transport_header(struct sk_buff *skb) +{ + skb->transport_header = skb->data - skb->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_transport_header(struct sk_buff *skb, + const int offset) +{ + skb_reset_transport_header(skb); + skb->transport_header += offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_network_header(const struct sk_buff *skb) +{ + return skb->head + skb->network_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_network_header(struct sk_buff *skb) +{ + skb->network_header = skb->data - skb->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_network_header(struct sk_buff *skb, const int offset) +{ + skb_reset_network_header(skb); + skb->network_header += offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_mac_header(const struct sk_buff *skb) +{ + return skb->head + skb->mac_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_mac_offset(const struct sk_buff *skb) +{ + return skb_mac_header(skb) - skb->data; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 skb_mac_header_len(const struct sk_buff *skb) +{ + return skb->network_header - skb->mac_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != (typeof(skb->mac_header))~0U; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->data - skb->head; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_mac_header(struct sk_buff *skb, const int offset) +{ + skb_reset_mac_header(skb); + skb->mac_header += offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_pop_mac_header(struct sk_buff *skb) +{ + skb->mac_header = skb->network_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_probe_transport_header(struct sk_buff *skb) +{ + struct flow_keys_basic keys; + + if (skb_transport_header_was_set(skb)) + return; + + if (skb_flow_dissect_flow_keys_basic(((void *)0), skb, &keys, + ((void *)0), 0, 0, 0, 0)) + skb_set_transport_header(skb, keys.control.thoff); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_mac_header_rebuild(struct sk_buff *skb) +{ + if (skb_mac_header_was_set(skb)) { + const unsigned char *old_mac = skb_mac_header(skb); + + skb_set_mac_header(skb, -skb->mac_len); + memmove(skb_mac_header(skb), old_mac, skb->mac_len); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ + return skb->head + skb->csum_start; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_transport_offset(const struct sk_buff *skb) +{ + return skb_transport_header(skb) - skb->data; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 skb_network_header_len(const struct sk_buff *skb) +{ + return skb->transport_header - skb->network_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 skb_inner_network_header_len(const struct sk_buff *skb) +{ + return skb->inner_transport_header - skb->inner_network_header; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_network_offset(const struct sk_buff *skb) +{ + return skb_network_header(skb) - skb->data; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_inner_network_offset(const struct sk_buff *skb) +{ + return skb_inner_network_header(skb) - skb->data; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) +{ + return pskb_may_pull(skb, skb_network_offset(skb) + len); +} +# 2662 "./include/linux/skbuff.h" +int ___pskb_trim(struct sk_buff *skb, unsigned int len); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_set_length(struct sk_buff *skb, unsigned int len) +{ + if (({ int __ret_warn_on = !!(skb_is_nonlinear(skb)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1208)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2666), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1209)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1210)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return; + skb->len = len; + skb_set_tail_pointer(skb, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_trim(struct sk_buff *skb, unsigned int len) +{ + __skb_set_length(skb, len); +} + +void skb_trim(struct sk_buff *skb, unsigned int len); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __pskb_trim(struct sk_buff *skb, unsigned int len) +{ + if (skb->data_len) + return ___pskb_trim(skb, len); + __skb_trim(skb, len); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pskb_trim(struct sk_buff *skb, unsigned int len) +{ + return (len < skb->len) ? __pskb_trim(skb, len) : 0; +} +# 2701 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void pskb_trim_unique(struct sk_buff *skb, unsigned int len) +{ + int err = pskb_trim(skb, len); + do { if (__builtin_expect(!!(err), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1211)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2704), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1212)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __skb_grow(struct sk_buff *skb, unsigned int len) +{ + unsigned int diff = len - skb->len; + + if (skb_tailroom(skb) < diff) { + int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), + ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); + if (ret) + return ret; + } + __skb_set_length(skb, len); + return 0; +} +# 2729 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_orphan(struct sk_buff *skb) +{ + if (skb->destructor) { + skb->destructor(skb); + skb->destructor = ((void *)0); + skb->sk = ((void *)0); + } else { + do { if (__builtin_expect(!!(skb->sk), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1213)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (2736), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1214)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + } +} +# 2749 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) +{ + if (__builtin_expect(!!(!skb_zcopy(skb)), 1)) + return 0; + if (!skb_zcopy_is_nouarg(skb) && + ((struct ubuf_info *)(((struct skb_shared_info *)(skb_end_pointer(skb)))->destructor_arg))->callback == sock_zerocopy_callback) + return 0; + return skb_copy_ubufs(skb, gfp_mask); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) +{ + if (__builtin_expect(!!(!skb_zcopy(skb)), 1)) + return 0; + return skb_copy_ubufs(skb, gfp_mask); +} +# 2775 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_queue_purge(struct sk_buff_head *list) +{ + struct sk_buff *skb; + while ((skb = __skb_dequeue(list)) != ((void *)0)) + kfree_skb(skb); +} +void skb_queue_purge(struct sk_buff_head *list); + +unsigned int skb_rbtree_purge(struct rb_root *root); + +void *netdev_alloc_frag(unsigned int fragsz); + +struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, + gfp_t gfp_mask); +# 2803 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *netdev_alloc_skb(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb(dev, length, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *__dev_alloc_skb(unsigned int length, + gfp_t gfp_mask) +{ + return __netdev_alloc_skb(((void *)0), length, gfp_mask); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *dev_alloc_skb(unsigned int length) +{ + return netdev_alloc_skb(((void *)0), length); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length, gfp_t gfp) +{ + struct sk_buff *skb = __netdev_alloc_skb(dev, length + 0, gfp); + + if (0 && skb) + skb_reserve(skb, 0); + return skb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb_ip_align(dev, length, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_free_frag(void *addr) +{ + page_frag_free(addr); +} + +void *napi_alloc_frag(unsigned int fragsz); +struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, + unsigned int length, gfp_t gfp_mask); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + return __napi_alloc_skb(napi, length, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); +} +void napi_consume_skb(struct sk_buff *skb, int budget); + +void __kfree_skb_flush(void); +void __kfree_skb_defer(struct sk_buff *skb); +# 2866 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *__dev_alloc_pages(gfp_t gfp_mask, + unsigned int order) +{ +# 2877 "./include/linux/skbuff.h" + gfp_mask |= (( gfp_t)0x40000u) | (( gfp_t)0x20000u); + + return alloc_pages_node((-1), gfp_mask, order); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *dev_alloc_pages(unsigned int order) +{ + return __dev_alloc_pages(((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)) | (( gfp_t)0x2000u), order); +} +# 2895 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *__dev_alloc_page(gfp_t gfp_mask) +{ + return __dev_alloc_pages(gfp_mask, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *dev_alloc_page(void) +{ + return dev_alloc_pages(0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_propagate_pfmemalloc(struct page *page, + struct sk_buff *skb) +{ + if (page_is_pfmemalloc(page)) + skb->pfmemalloc = true; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->bv_offset; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->bv_offset += delta; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) +{ + frag->bv_offset = offset; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_off_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_offset = fragfrom->bv_offset; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page *skb_frag_page(const skb_frag_t *frag) +{ + return frag->bv_page; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_frag_ref(skb_frag_t *frag) +{ + get_page(skb_frag_page(frag)); +} +# 2986 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_ref(struct sk_buff *skb, int f) +{ + __skb_frag_ref(&((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[f]); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +# 3009 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_unref(struct sk_buff *skb, int f) +{ + __skb_frag_unref(&((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[f]); +} +# 3021 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_frag_address(const skb_frag_t *frag) +{ + return lowmem_page_address(skb_frag_page(frag)) + skb_frag_off(frag); +} +# 3033 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_frag_address_safe(const skb_frag_t *frag) +{ + void *ptr = lowmem_page_address(skb_frag_page(frag)); + if (__builtin_expect(!!(!ptr), 0)) + return ((void *)0); + + return ptr + skb_frag_off(frag); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_page_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) +{ + fragto->bv_page = fragfrom->bv_page; +} +# 3060 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_frag_set_page(skb_frag_t *frag, struct page *page) +{ + frag->bv_page = page; +} +# 3073 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_set_page(struct sk_buff *skb, int f, + struct page *page) +{ + __skb_frag_set_page(&((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[f], page); +} + +bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); +# 3092 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) dma_addr_t skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page_attrs(dev, skb_frag_page(frag), skb_frag_off(frag) + offset, size, dir, 0) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *pskb_copy(struct sk_buff *skb, + gfp_t gfp_mask) +{ + return __pskb_copy(skb, skb_headroom(skb), gfp_mask); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, + gfp_t gfp_mask) +{ + return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); +} +# 3123 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_clone_writable(const struct sk_buff *skb, unsigned int len) +{ + return !skb_header_cloned(skb) && + skb_headroom(skb) + len <= skb->hdr_len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && + pskb_expand_head(skb, 0, 0, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __skb_cow(struct sk_buff *skb, unsigned int headroom, + int cloned) +{ + int delta = 0; + + if (headroom > skb_headroom(skb)) + delta = headroom - skb_headroom(skb); + + if (delta || cloned) + return pskb_expand_head(skb, ((((delta)) + ((typeof((delta)))((__builtin_choose_expr(((!!(sizeof((typeof(32) *)1 == (typeof((1 << (6))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(32) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((1 << (6))) * 0l)) : (int *)8))))), ((32) > ((1 << (6))) ? (32) : ((1 << (6)))), ({ typeof(32) __UNIQUE_ID___x1215 = (32); typeof((1 << (6))) __UNIQUE_ID___y1216 = ((1 << (6))); ((__UNIQUE_ID___x1215) > (__UNIQUE_ID___y1216) ? (__UNIQUE_ID___x1215) : (__UNIQUE_ID___y1216)); })))) - 1)) & ~((typeof((delta)))((__builtin_choose_expr(((!!(sizeof((typeof(32) *)1 == (typeof((1 << (6))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(32) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((1 << (6))) * 0l)) : (int *)8))))), ((32) > ((1 << (6))) ? (32) : ((1 << (6)))), ({ typeof(32) __UNIQUE_ID___x1215 = (32); typeof((1 << (6))) __UNIQUE_ID___y1216 = ((1 << (6))); ((__UNIQUE_ID___x1215) > (__UNIQUE_ID___y1216) ? (__UNIQUE_ID___x1215) : (__UNIQUE_ID___y1216)); })))) - 1)), 0, + ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); + return 0; +} +# 3162 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_cow(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_cloned(skb)); +} +# 3177 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_header_cloned(skb)); +} +# 3192 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if (__builtin_expect(!!(size >= len), 1)) + return 0; + return skb_pad(skb, len - size); +} +# 3211 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __skb_put_padto(struct sk_buff *skb, unsigned int len, + bool free_on_error) +{ + unsigned int size = skb->len; + + if (__builtin_expect(!!(size < len), 0)) { + len -= size; + if (__skb_pad(skb, len, free_on_error)) + return -12; + __skb_put(skb, len); + } + return 0; +} +# 3235 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + return __skb_put_padto(skb, len, true); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_add_data(struct sk_buff *skb, + struct iov_iter *from, int copy) +{ + const int off = skb->len; + + if (skb->ip_summed == 0) { + __wsum csum = 0; + if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, + &csum, from)) { + skb->csum = csum_block_add(skb->csum, csum, off); + return 0; + } + } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) + return 0; + + __skb_trim(skb, off); + return -14; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_can_coalesce(struct sk_buff *skb, int i, + const struct page *page, int off) +{ + if (skb_zcopy(skb)) + return false; + if (i) { + const skb_frag_t *frag = &((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i - 1]; + + return page == skb_frag_page(frag) && + off == skb_frag_off(frag) + skb_frag_size(frag); + } + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __skb_linearize(struct sk_buff *skb) +{ + return __pskb_pull_tail(skb, skb->data_len) ? 0 : -12; +} +# 3285 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_linearize(struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; +} +# 3297 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_has_shared_frag(const struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) && + ((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags & SKBTX_SHARED_FRAG; +} +# 3310 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_linearize_cow(struct sk_buff *skb) +{ + return skb_is_nonlinear(skb) || skb_cloned(skb) ? + __skb_linearize(skb) : 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, + unsigned int off) +{ + if (skb->ip_summed == 2) + skb->csum = csum_block_sub(skb->csum, + csum_partial(start, len, 0), off); + else if (skb->ip_summed == 3 && + skb_checksum_start_offset(skb) < 0) + skb->ip_summed = 0; +} +# 3338 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_postpull_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + __skb_postpull_rcsum(skb, start, len, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, + unsigned int off) +{ + if (skb->ip_summed == 2) + skb->csum = csum_block_add(skb->csum, + csum_partial(start, len, 0), off); +} +# 3362 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_postpush_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + __skb_postpush_rcsum(skb, start, len, 0); +} + +void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +# 3381 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_push_rcsum(struct sk_buff *skb, unsigned int len) +{ + skb_push(skb, len); + skb_postpush_rcsum(skb, skb->data, len); + return skb->data; +} + +int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); +# 3399 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (__builtin_expect(!!(len >= skb->len), 1)) + return 0; + return pskb_trim_rcsum_slow(skb, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == 2) + skb->ip_summed = 0; + __skb_trim(skb, len); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == 2) + skb->ip_summed = 0; + return __skb_grow(skb, len); +} +# 3473 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_has_frag_list(const struct sk_buff *skb) +{ + return ((struct skb_shared_info *)(skb_end_pointer(skb)))->frag_list != ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_frag_list_init(struct sk_buff *skb) +{ + ((struct skb_shared_info *)(skb_end_pointer(skb)))->frag_list = ((void *)0); +} + + + + + +int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, + int *err, long *timeo_p, + const struct sk_buff *skb); +struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, + struct sk_buff_head *queue, + unsigned int flags, + int *off, int *err, + struct sk_buff **last); +struct sk_buff *__skb_try_recv_datagram(struct sock *sk, + struct sk_buff_head *queue, + unsigned int flags, int *off, int *err, + struct sk_buff **last); +struct sk_buff *__skb_recv_datagram(struct sock *sk, + struct sk_buff_head *sk_queue, + unsigned int flags, int *off, int *err); +struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, + int *err); +__poll_t datagram_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait); +int skb_copy_datagram_iter(const struct sk_buff *from, int offset, + struct iov_iter *to, int size); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_copy_datagram_msg(const struct sk_buff *from, int offset, + struct msghdr *msg, int size) +{ + return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); +} +int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, + struct msghdr *msg); +int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, + struct iov_iter *to, int len, + struct ahash_request *hash); +int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, + struct iov_iter *from, int len); +int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); +void skb_free_datagram(struct sock *sk, struct sk_buff *skb); +void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_free_datagram_locked(struct sock *sk, + struct sk_buff *skb) +{ + __skb_free_datagram_locked(sk, skb, 0); +} +int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); +int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); +int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); +__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, + int len, __wsum csum); +int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, + struct pipe_inode_info *pipe, unsigned int len, + unsigned int flags); +int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, + int len); +void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); +unsigned int skb_zerocopy_headlen(const struct sk_buff *from); +int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, + int len, int hlen); +void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); +int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); +void skb_scrub_packet(struct sk_buff *skb, bool xnet); +bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); +bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); +struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); +struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, + unsigned int offset); +struct sk_buff *skb_vlan_untag(struct sk_buff *skb); +int skb_ensure_writable(struct sk_buff *skb, int write_len); +int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci); +int skb_vlan_pop(struct sk_buff *skb); +int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); +int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, + int mac_len, bool ethernet); +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, + bool ethernet); +int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse); +int skb_mpls_dec_ttl(struct sk_buff *skb); +struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, + gfp_t gfp); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int memcpy_from_msg(void *data, struct msghdr *msg, int len) +{ + return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -14; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int memcpy_to_msg(struct msghdr *msg, void *data, int len) +{ + return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -14; +} + +struct skb_checksum_ops { + __wsum (*update)(const void *mem, int len, __wsum wsum); + __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); +}; + +extern const struct skb_checksum_ops *crc32c_csum_stub __attribute__((__section__(".data..read_mostly"))); + +__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, + __wsum csum, const struct skb_checksum_ops *ops); +__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, + __wsum csum); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void * __attribute__((__warn_unused_result__)) +__skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *data, int hlen, void *buffer) +{ + if (hlen - offset >= len) + return data + offset; + + if (!skb || + skb_copy_bits(skb, offset, buffer, len) < 0) + return ((void *)0); + + return buffer; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void * __attribute__((__warn_unused_result__)) +skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) +{ + return __skb_header_pointer(skb, offset, len, skb->data, + skb_headlen(skb), buffer); +} +# 3617 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_needs_linearize(struct sk_buff *skb, + netdev_features_t features) +{ + return skb_is_nonlinear(skb) && + ((skb_has_frag_list(skb) && !(features & ((netdev_features_t)1 << (NETIF_F_FRAGLIST_BIT)))) || + (((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags && !(features & ((netdev_features_t)1 << (NETIF_F_SG_BIT))))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_copy_from_linear_data(const struct sk_buff *skb, + void *to, + const unsigned int len) +{ + memcpy(to, skb->data, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_copy_from_linear_data_offset(const struct sk_buff *skb, + const int offset, void *to, + const unsigned int len) +{ + memcpy(to, skb->data + offset, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_copy_to_linear_data(struct sk_buff *skb, + const void *from, + const unsigned int len) +{ + memcpy(skb->data, from, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_copy_to_linear_data_offset(struct sk_buff *skb, + const int offset, + const void *from, + const unsigned int len) +{ + memcpy(skb->data + offset, from, len); +} + +void skb_init(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t skb_get_ktime(const struct sk_buff *skb) +{ + return skb->tstamp; +} +# 3670 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_get_timestamp(const struct sk_buff *skb, + struct __kernel_old_timeval *stamp) +{ + *stamp = ns_to_kernel_old_timeval(skb->tstamp); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_get_new_timestamp(const struct sk_buff *skb, + struct __kernel_sock_timeval *stamp) +{ + struct timespec64 ts = ns_to_timespec64((skb->tstamp)); + + stamp->tv_sec = ts.tv_sec; + stamp->tv_usec = ts.tv_nsec / 1000; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_get_timestampns(const struct sk_buff *skb, + struct __kernel_old_timespec *stamp) +{ + struct timespec64 ts = ns_to_timespec64((skb->tstamp)); + + stamp->tv_sec = ts.tv_sec; + stamp->tv_nsec = ts.tv_nsec; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_get_new_timestampns(const struct sk_buff *skb, + struct __kernel_timespec *stamp) +{ + struct timespec64 ts = ns_to_timespec64((skb->tstamp)); + + stamp->tv_sec = ts.tv_sec; + stamp->tv_nsec = ts.tv_nsec; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __net_timestamp(struct sk_buff *skb) +{ + skb->tstamp = ktime_get_real(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t net_timedelta(ktime_t t) +{ + return ((ktime_get_real()) - (t)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t net_invalid_timestamp(void) +{ + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u8 skb_metadata_len(const struct sk_buff *skb) +{ + return ((struct skb_shared_info *)(skb_end_pointer(skb)))->meta_len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_metadata_end(const struct sk_buff *skb) +{ + return skb_mac_header(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __skb_metadata_differs(const struct sk_buff *skb_a, + const struct sk_buff *skb_b, + u8 meta_len) +{ + const void *a = skb_metadata_end(skb_a); + const void *b = skb_metadata_end(skb_b); + + + u64 diffs = 0; + + switch (meta_len) { + + + case 32: diffs |= (*(u64 *)(a -= sizeof(u64))) ^ (*(u64 *)(b -= sizeof(u64))); + + case 24: diffs |= (*(u64 *)(a -= sizeof(u64))) ^ (*(u64 *)(b -= sizeof(u64))); + + case 16: diffs |= (*(u64 *)(a -= sizeof(u64))) ^ (*(u64 *)(b -= sizeof(u64))); + + case 8: diffs |= (*(u64 *)(a -= sizeof(u64))) ^ (*(u64 *)(b -= sizeof(u64))); + break; + case 28: diffs |= (*(u64 *)(a -= sizeof(u64))) ^ (*(u64 *)(b -= sizeof(u64))); + + case 20: diffs |= (*(u64 *)(a -= sizeof(u64))) ^ (*(u64 *)(b -= sizeof(u64))); + + case 12: diffs |= (*(u64 *)(a -= sizeof(u64))) ^ (*(u64 *)(b -= sizeof(u64))); + + case 4: diffs |= (*(u32 *)(a -= sizeof(u32))) ^ (*(u32 *)(b -= sizeof(u32))); + break; + } + return diffs; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_metadata_differs(const struct sk_buff *skb_a, + const struct sk_buff *skb_b) +{ + u8 len_a = skb_metadata_len(skb_a); + u8 len_b = skb_metadata_len(skb_b); + + if (!(len_a | len_b)) + return false; + + return len_a != len_b ? + true : __skb_metadata_differs(skb_a, skb_b, len_a); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_metadata_set(struct sk_buff *skb, u8 meta_len) +{ + ((struct skb_shared_info *)(skb_end_pointer(skb)))->meta_len = meta_len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_metadata_clear(struct sk_buff *skb) +{ + skb_metadata_set(skb, 0); +} + +struct sk_buff *skb_clone_sk(struct sk_buff *skb); + + + +void skb_clone_tx_timestamp(struct sk_buff *skb); +bool skb_defer_rx_timestamp(struct sk_buff *skb); +# 3819 "./include/linux/skbuff.h" +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); + +void __skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, int tstype); +# 3837 "./include/linux/skbuff.h" +void skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps); +# 3852 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_tx_timestamp(struct sk_buff *skb) +{ + skb_clone_tx_timestamp(skb); + if (((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags & SKBTX_SW_TSTAMP) + skb_tstamp_tx(skb, ((void *)0)); +} +# 3866 "./include/linux/skbuff.h" +void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); + +__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); +__sum16 __skb_checksum_complete(struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_csum_unnecessary(const struct sk_buff *skb) +{ + return ((skb->ip_summed == 1) || + skb->csum_valid || + (skb->ip_summed == 3 && + skb_checksum_start_offset(skb) >= 0)); +} +# 3895 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 skb_checksum_complete(struct sk_buff *skb) +{ + return skb_csum_unnecessary(skb) ? + 0 : __skb_checksum_complete(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_decr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == 1) { + if (skb->csum_level == 0) + skb->ip_summed = 0; + else + skb->csum_level--; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_incr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == 1) { + if (skb->csum_level < 3) + skb->csum_level++; + } else if (skb->ip_summed == 0) { + skb->ip_summed = 1; + skb->csum_level = 0; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_reset_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == 1) { + skb->ip_summed = 0; + skb->csum_level = 0; + } +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __skb_checksum_validate_needed(struct sk_buff *skb, + bool zero_okay, + __sum16 check) +{ + if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { + skb->csum_valid = 1; + __skb_decr_checksum_unnecessary(skb); + return false; + } + + return true; +} +# 3959 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_checksum_complete_unset(struct sk_buff *skb) +{ + if (skb->ip_summed == 2) + skb->ip_summed = 0; +} +# 3974 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, + bool complete, + __wsum psum) +{ + if (skb->ip_summed == 2) { + if (!csum_fold(csum_add(psum, skb->csum))) { + skb->csum_valid = 1; + return 0; + } + } + + skb->csum = psum; + + if (complete || skb->len <= 76) { + __sum16 csum; + + csum = __skb_checksum_complete(skb); + skb->csum_valid = !csum; + return csum; + } + + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum null_compute_pseudo(struct sk_buff *skb, int proto) +{ + return 0; +} +# 4040 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __skb_checksum_convert_check(struct sk_buff *skb) +{ + return (skb->ip_summed == 0 && skb->csum_valid); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo) +{ + skb->csum = ~pseudo; + skb->ip_summed = 2; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, + u16 start, u16 offset) +{ + skb->ip_summed = 3; + skb->csum_start = ((unsigned char *)ptr + start) - skb->head; + skb->csum_offset = offset - start; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_remcsum_process(struct sk_buff *skb, void *ptr, + int start, int offset, bool nopartial) +{ + __wsum delta; + + if (!nopartial) { + skb_remcsum_adjust_partial(skb, ptr, start, offset); + return; + } + + if (__builtin_expect(!!(skb->ip_summed != 2), 0)) { + __skb_checksum_complete(skb); + skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); + } + + delta = remcsum_adjust(ptr, skb->csum, start, offset); + + + skb->csum = csum_add(skb->csum, delta); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nf_conntrack *skb_nfct(const struct sk_buff *skb) +{ + + return (void *)(skb->_nfct & ~(7UL)); + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long skb_get_nfct(const struct sk_buff *skb) +{ + + return skb->_nfct; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) +{ + + skb->_nfct = nfct; + +} + + +enum skb_ext_id { + + SKB_EXT_BRIDGE_NF, + + + SKB_EXT_SEC_PATH, + + + TC_SKB_EXT, + + + SKB_EXT_MPTCP, + + SKB_EXT_NUM, +}; +# 4143 "./include/linux/skbuff.h" +struct skb_ext { + refcount_t refcnt; + u8 offset[SKB_EXT_NUM]; + u8 chunks; + char data[] __attribute__((__aligned__(8))); +}; + +struct skb_ext *__skb_ext_alloc(gfp_t flags); +void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, + struct skb_ext *ext); +void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); +void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); +void __skb_ext_put(struct skb_ext *ext); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_ext_put(struct sk_buff *skb) +{ + if (skb->active_extensions) + __skb_ext_put(skb->extensions); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_ext_copy(struct sk_buff *dst, + const struct sk_buff *src) +{ + dst->active_extensions = src->active_extensions; + + if (src->active_extensions) { + struct skb_ext *ext = src->extensions; + + refcount_inc(&ext->refcnt); + dst->extensions = ext; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) +{ + skb_ext_put(dst); + __skb_ext_copy(dst, src); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i) +{ + return !!ext->offset[i]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) +{ + return skb->active_extensions & (1 << id); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) +{ + if (skb_ext_exist(skb, id)) + __skb_ext_del(skb, id); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) +{ + if (skb_ext_exist(skb, id)) { + struct skb_ext *ext = skb->extensions; + + return (void *)ext + (ext->offset[id] << 3); + } + + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_ext_reset(struct sk_buff *skb) +{ + if (__builtin_expect(!!(skb->active_extensions), 0)) { + __skb_ext_put(skb->extensions); + skb->active_extensions = 0; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_has_extensions(struct sk_buff *skb) +{ + return __builtin_expect(!!(skb->active_extensions), 0); +} +# 4230 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nf_reset_ct(struct sk_buff *skb) +{ + + nf_conntrack_put(skb_nfct(skb)); + skb->_nfct = 0; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nf_reset_trace(struct sk_buff *skb) +{ + + skb->nf_trace = 0; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ipvs_reset(struct sk_buff *skb) +{ + + skb->ipvs_property = 0; + +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, + bool copy) +{ + + dst->_nfct = src->_nfct; + nf_conntrack_get(skb_nfct(src)); + + + if (copy) + dst->nf_trace = src->nf_trace; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ + + nf_conntrack_put(skb_nfct(dst)); + + __nf_copy(dst, src, true); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +{ + to->secmark = from->secmark; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_init_secmark(struct sk_buff *skb) +{ + skb->secmark = 0; +} +# 4292 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int secpath_exists(const struct sk_buff *skb) +{ + + return skb_ext_exist(skb, SKB_EXT_SEC_PATH); + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_irq_freeable(const struct sk_buff *skb) +{ + return !skb->destructor && + !secpath_exists(skb) && + !skb_nfct(skb) && + !skb->_skb_refdst && + !skb_has_frag_list(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) +{ + skb->queue_mapping = queue_mapping; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 skb_get_queue_mapping(const struct sk_buff *skb) +{ + return skb->queue_mapping; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) +{ + to->queue_mapping = from->queue_mapping; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) +{ + skb->queue_mapping = rx_queue + 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 skb_get_rx_queue(const struct sk_buff *skb) +{ + return skb->queue_mapping - 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_rx_queue_recorded(const struct sk_buff *skb) +{ + return skb->queue_mapping != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val) +{ + skb->dst_pending_confirm = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_get_dst_pending_confirm(const struct sk_buff *skb) +{ + return skb->dst_pending_confirm != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sec_path *skb_sec_path(const struct sk_buff *skb) +{ + + return skb_ext_find(skb, SKB_EXT_SEC_PATH); + + + +} + + + + + + + +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_tnl_header_len(const struct sk_buff *inner_skb) +{ + return (skb_mac_header(inner_skb) - inner_skb->head) - + ((struct skb_gso_cb *)((inner_skb)->cb + 32))->mac_offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int gso_pskb_expand_head(struct sk_buff *skb, int extra) +{ + int new_headroom, headroom; + int ret; + + headroom = skb_headroom(skb); + ret = pskb_expand_head(skb, extra, 0, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); + if (ret) + return ret; + + new_headroom = skb_headroom(skb); + ((struct skb_gso_cb *)((skb)->cb + 32))->mac_offset += (new_headroom - headroom); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void gso_reset_checksum(struct sk_buff *skb, __wsum res) +{ + + if (skb->remcsum_offload) + return; + + ((struct skb_gso_cb *)((skb)->cb + 32))->csum = res; + ((struct skb_gso_cb *)((skb)->cb + 32))->csum_start = skb_checksum_start(skb) - skb->head; +} +# 4416 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) +{ + unsigned char *csum_start = skb_transport_header(skb); + int plen = (skb->head + ((struct skb_gso_cb *)((skb)->cb + 32))->csum_start) - csum_start; + __wsum partial = ((struct skb_gso_cb *)((skb)->cb + 32))->csum; + + ((struct skb_gso_cb *)((skb)->cb + 32))->csum = res; + ((struct skb_gso_cb *)((skb)->cb + 32))->csum_start = csum_start - skb->head; + + return csum_fold(csum_partial(csum_start, plen, partial)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_is_gso(const struct sk_buff *skb) +{ + return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_size; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_is_gso_v6(const struct sk_buff *skb) +{ + return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & SKB_GSO_TCPV6; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_is_gso_sctp(const struct sk_buff *skb) +{ + return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & SKB_GSO_SCTP; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_is_gso_tcp(const struct sk_buff *skb) +{ + return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gso_reset(struct sk_buff *skb) +{ + ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_size = 0; + ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs = 0; + ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_increase_gso_size(struct skb_shared_info *shinfo, + u16 increment) +{ + if (({ int __ret_warn_on = !!(shinfo->gso_size == 0xFFFF); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1217)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (4461), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1218)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1219)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return; + shinfo->gso_size += increment; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_decrease_gso_size(struct skb_shared_info *shinfo, + u16 decrement) +{ + if (({ int __ret_warn_on = !!(shinfo->gso_size == 0xFFFF); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1220)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/skbuff.h"), "i" (4469), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1221)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1222)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return; + shinfo->gso_size -= decrement; +} + +void __skb_warn_lro_forwarding(const struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_warn_if_lro(const struct sk_buff *skb) +{ + + + const struct skb_shared_info *shinfo = ((struct skb_shared_info *)(skb_end_pointer(skb))); + + if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && + __builtin_expect(!!(shinfo->gso_type == 0), 0)) { + __skb_warn_lro_forwarding(skb); + return true; + } + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_forward_csum(struct sk_buff *skb) +{ + + if (skb->ip_summed == 2) + skb->ip_summed = 0; +} +# 4505 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_checksum_none_assert(const struct sk_buff *skb) +{ + + + +} + +bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); + +int skb_checksum_setup(struct sk_buff *skb, bool recalculate); +struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, + unsigned int transport_len, + __sum16(*skb_chkf)(struct sk_buff *skb)); +# 4528 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_head_is_locked(const struct sk_buff *skb) +{ + return !skb->head_frag || skb_cloned(skb); +} +# 4542 "./include/linux/skbuff.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __wsum lco_csum(struct sk_buff *skb) +{ + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *l4_hdr = skb_transport_header(skb); + __wsum partial; + + + partial = ~csum_unfold(*( __sum16 *)(csum_start + + skb->csum_offset)); + + + + + return csum_partial(l4_hdr, csum_start - l4_hdr, partial); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_is_redirected(const struct sk_buff *skb) +{ + + return skb->redirected; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_redirected(struct sk_buff *skb, bool from_ingress) +{ + + skb->redirected = 1; + skb->from_ingress = from_ingress; + if (skb->from_ingress) + skb->tstamp = 0; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_reset_redirect(struct sk_buff *skb) +{ + + skb->redirected = 0; + +} +# 20 "./include/linux/if_ether.h" 2 + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct ethhdr *eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb_mac_header(skb); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct ethhdr *skb_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb->data; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb_inner_mac_header(skb); +} + +int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); + +extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); +# 20 "./include/uapi/linux/ethtool.h" 2 +# 96 "./include/uapi/linux/ethtool.h" +struct ethtool_cmd { + __u32 cmd; + __u32 supported; + __u32 advertising; + __u16 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 transceiver; + __u8 autoneg; + __u8 mdio_support; + __u32 maxtxpkt; + __u32 maxrxpkt; + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __u32 lp_advertising; + __u32 reserved[2]; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} +# 177 "./include/uapi/linux/ethtool.h" +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char erom_version[32]; + char reserved2[12]; + __u32 n_priv_flags; + __u32 n_stats; + __u32 testinfo_len; + __u32 eedump_len; + __u32 regdump_len; +}; +# 203 "./include/uapi/linux/ethtool.h" +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[6]; +}; + + +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + + + + +enum tunable_id { + ETHTOOL_ID_UNSPEC, + ETHTOOL_RX_COPYBREAK, + ETHTOOL_TX_COPYBREAK, + ETHTOOL_PFC_PREVENTION_TOUT, + + + + + __ETHTOOL_TUNABLE_COUNT, +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC, + ETHTOOL_TUNABLE_U8, + ETHTOOL_TUNABLE_U16, + ETHTOOL_TUNABLE_U32, + ETHTOOL_TUNABLE_U64, + ETHTOOL_TUNABLE_STRING, + ETHTOOL_TUNABLE_S8, + ETHTOOL_TUNABLE_S16, + ETHTOOL_TUNABLE_S32, + ETHTOOL_TUNABLE_S64, +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; +# 283 "./include/uapi/linux/ethtool.h" +enum phy_tunable_id { + ETHTOOL_PHY_ID_UNSPEC, + ETHTOOL_PHY_DOWNSHIFT, + ETHTOOL_PHY_FAST_LINK_DOWN, + ETHTOOL_PHY_EDPD, + + + + + __ETHTOOL_PHY_TUNABLE_COUNT, +}; +# 310 "./include/uapi/linux/ethtool.h" +struct ethtool_regs { + __u32 cmd; + __u32 version; + __u32 len; + __u8 data[0]; +}; +# 335 "./include/uapi/linux/ethtool.h" +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; + __u32 len; + __u8 data[0]; +}; +# 360 "./include/uapi/linux/ethtool.h" +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; +# 382 "./include/uapi/linux/ethtool.h" +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; +# 462 "./include/uapi/linux/ethtool.h" +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; +# 513 "./include/uapi/linux/ethtool.h" +struct ethtool_ringparam { + __u32 cmd; + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; +# 541 "./include/uapi/linux/ethtool.h" +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; +# 575 "./include/uapi/linux/ethtool.h" +struct ethtool_pauseparam { + __u32 cmd; + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; +# 603 "./include/uapi/linux/ethtool.h" +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, + ETH_SS_PRIV_FLAGS, + ETH_SS_NTUPLE_FILTERS, + ETH_SS_FEATURES, + ETH_SS_RSS_HASH_FUNCS, + ETH_SS_TUNABLES, + ETH_SS_PHY_STATS, + ETH_SS_PHY_TUNABLES, + ETH_SS_LINK_MODES, + ETH_SS_MSG_CLASSES, + ETH_SS_WOL_MODES, + ETH_SS_SOF_TIMESTAMPING, + ETH_SS_TS_TX_TYPES, + ETH_SS_TS_RX_FILTERS, + + + ETH_SS_COUNT +}; +# 636 "./include/uapi/linux/ethtool.h" +struct ethtool_gstrings { + __u32 cmd; + __u32 string_set; + __u32 len; + __u8 data[0]; +}; +# 660 "./include/uapi/linux/ethtool.h" +struct ethtool_sset_info { + __u32 cmd; + __u32 reserved; + __u64 sset_mask; + __u32 data[0]; +}; +# 677 "./include/uapi/linux/ethtool.h" +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), + ETH_TEST_FL_EXTERNAL_LB = (1 << 2), + ETH_TEST_FL_EXTERNAL_LB_DONE = (1 << 3), +}; +# 698 "./include/uapi/linux/ethtool.h" +struct ethtool_test { + __u32 cmd; + __u32 flags; + __u32 reserved; + __u32 len; + __u64 data[0]; +}; +# 717 "./include/uapi/linux/ethtool.h" +struct ethtool_stats { + __u32 cmd; + __u32 n_stats; + __u64 data[0]; +}; +# 734 "./include/uapi/linux/ethtool.h" +struct ethtool_perm_addr { + __u32 cmd; + __u32 size; + __u8 data[0]; +}; +# 749 "./include/uapi/linux/ethtool.h" +enum ethtool_flags { + ETH_FLAG_TXVLAN = (1 << 7), + ETH_FLAG_RXVLAN = (1 << 8), + ETH_FLAG_LRO = (1 << 15), + ETH_FLAG_NTUPLE = (1 << 27), + ETH_FLAG_RXHASH = (1 << 28), +}; +# 773 "./include/uapi/linux/ethtool.h" +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; +# 790 "./include/uapi/linux/ethtool.h" +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; +# 808 "./include/uapi/linux/ethtool.h" +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; +# 827 "./include/uapi/linux/ethtool.h" +struct ethtool_tcpip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be16 psrc; + __be16 pdst; + __u8 tclass; +}; +# 844 "./include/uapi/linux/ethtool.h" +struct ethtool_ah_espip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 spi; + __u8 tclass; +}; +# 859 "./include/uapi/linux/ethtool.h" +struct ethtool_usrip6_spec { + __be32 ip6src[4]; + __be32 ip6dst[4]; + __be32 l4_4_bytes; + __u8 tclass; + __u8 l4_proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_tcpip6_spec tcp_ip6_spec; + struct ethtool_tcpip6_spec udp_ip6_spec; + struct ethtool_tcpip6_spec sctp_ip6_spec; + struct ethtool_ah_espip6_spec ah_ip6_spec; + struct ethtool_ah_espip6_spec esp_ip6_spec; + struct ethtool_usrip6_spec usr_ip6_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; +# 895 "./include/uapi/linux/ethtool.h" +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[6]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +# 920 "./include/uapi/linux/ethtool.h" +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +# 944 "./include/uapi/linux/ethtool.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return 0x00000000FFFFFFFFLL & ring_cookie; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (0x000000FF00000000LL & ring_cookie) >> + 32; +} +# 1016 "./include/uapi/linux/ethtool.h" +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + union { + __u32 rule_cnt; + __u32 rss_context; + }; + __u32 rule_locs[0]; +}; +# 1042 "./include/uapi/linux/ethtool.h" +struct ethtool_rxfh_indir { + __u32 cmd; + __u32 size; + __u32 ring_index[0]; +}; +# 1077 "./include/uapi/linux/ethtool.h" +struct ethtool_rxfh { + __u32 cmd; + __u32 rss_context; + __u32 indir_size; + __u32 key_size; + __u8 hfunc; + __u8 rsvd8[3]; + __u32 rsvd32; + __u32 rss_config[0]; +}; +# 1106 "./include/uapi/linux/ethtool.h" +struct ethtool_rx_ntuple_flow_spec { + __u32 flow_type; + union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + __u8 hdata[72]; + } h_u, m_u; + + __u16 vlan_tag; + __u16 vlan_tag_mask; + __u64 data; + __u64 data_mask; + + __s32 action; + + +}; + + + + + + +struct ethtool_rx_ntuple { + __u32 cmd; + struct ethtool_rx_ntuple_flow_spec fs; +}; + + +enum ethtool_flash_op_type { + ETHTOOL_FLASH_ALL_REGIONS = 0, +}; + + +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[128]; +}; +# 1165 "./include/uapi/linux/ethtool.h" +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; +# 1184 "./include/uapi/linux/ethtool.h" +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; +# 1199 "./include/uapi/linux/ethtool.h" +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + + + + + + +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + + + + + + + +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; +# 1245 "./include/uapi/linux/ethtool.h" +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; +# 1280 "./include/uapi/linux/ethtool.h" +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT, + ETHTOOL_F_WISH__BIT, + ETHTOOL_F_COMPAT__BIT, +}; +# 1299 "./include/uapi/linux/ethtool.h" +struct ethtool_per_queue_op { + __u32 cmd; + __u32 sub_command; + __u32 queue_mask[(((4096) + (32) - 1) / (32))]; + char data[]; +}; +# 1317 "./include/uapi/linux/ethtool.h" +struct ethtool_fecparam { + __u32 cmd; + + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; +# 1333 "./include/uapi/linux/ethtool.h" +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT, + ETHTOOL_FEC_AUTO_BIT, + ETHTOOL_FEC_OFF_BIT, + ETHTOOL_FEC_RS_BIT, + ETHTOOL_FEC_BASER_BIT, + ETHTOOL_FEC_LLRS_BIT, +}; +# 1449 "./include/uapi/linux/ethtool.h" +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + + + + + + + + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + + __ETHTOOL_LINK_MODE_MASK_NBITS +}; +# 1647 "./include/uapi/linux/ethtool.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ethtool_validate_speed(__u32 speed) +{ + return speed <= ((int)(~0U >> 1)) || speed == (__u32)-1; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ethtool_validate_duplex(__u8 duplex) +{ + switch (duplex) { + case 0x00: + case 0x01: + case 0xff: + return 1; + } + + return 0; +} +# 1786 "./include/uapi/linux/ethtool.h" +enum ethtool_reset_flags { + + + + + + ETH_RESET_MGMT = 1 << 0, + ETH_RESET_IRQ = 1 << 1, + ETH_RESET_DMA = 1 << 2, + ETH_RESET_FILTER = 1 << 3, + ETH_RESET_OFFLOAD = 1 << 4, + ETH_RESET_MAC = 1 << 5, + ETH_RESET_PHY = 1 << 6, + ETH_RESET_RAM = 1 << 7, + + ETH_RESET_AP = 1 << 8, + + ETH_RESET_DEDICATED = 0x0000ffff, + + ETH_RESET_ALL = 0xffffffff, + +}; +# 1907 "./include/uapi/linux/ethtool.h" +struct ethtool_link_settings { + __u32 cmd; + __u32 speed; + __u8 duplex; + __u8 port; + __u8 phy_address; + __u8 autoneg; + __u8 mdio_support; + __u8 eth_tp_mdix; + __u8 eth_tp_mdix_ctrl; + __s8 link_mode_masks_nwords; + __u8 transceiver; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 reserved1[1]; + __u32 reserved[7]; + __u32 link_mode_masks[0]; + + + + + +}; +# 19 "./include/linux/ethtool.h" 2 + + + +struct compat_ethtool_rx_flow_spec { + u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + compat_u64 ring_cookie; + u32 location; +}; + +struct compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[]; +}; +# 54 "./include/linux/ethtool.h" +enum ethtool_phys_id_state { + ETHTOOL_ID_INACTIVE, + ETHTOOL_ID_ACTIVE, + ETHTOOL_ID_ON, + ETHTOOL_ID_OFF +}; + +enum { + ETH_RSS_HASH_TOP_BIT, + ETH_RSS_HASH_XOR_BIT, + ETH_RSS_HASH_CRC32_BIT, + + + + + + ETH_RSS_HASH_FUNCS_COUNT +}; +# 83 "./include/linux/ethtool.h" +struct net_device; + + +u32 ethtool_op_get_link(struct net_device *dev); +int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); +# 96 "./include/linux/ethtool.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +# 108 "./include/linux/ethtool.h" +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + unsigned long supported[(((__ETHTOOL_LINK_MODE_MASK_NBITS) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; + unsigned long advertising[(((__ETHTOOL_LINK_MODE_MASK_NBITS) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; + unsigned long lp_advertising[(((__ETHTOOL_LINK_MODE_MASK_NBITS) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; + } link_modes; +}; +# 159 "./include/linux/ethtool.h" +extern int +__ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings); +# 170 "./include/linux/ethtool.h" +void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); + +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32); + + +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src); +# 376 "./include/linux/ethtool.h" +struct ethtool_ops { + u32 supported_coalesce_params; + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, + struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, + struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state); + void (*get_ethtool_stats)(struct net_device *, + struct ethtool_stats *, u64 *); + int (*begin)(struct net_device *); + void (*complete)(struct net_device *); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + int (*get_rxnfc)(struct net_device *, + struct ethtool_rxnfc *, u32 *rule_locs); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); + int (*reset)(struct net_device *, u32 *); + u32 (*get_rxfh_key_size)(struct net_device *); + u32 (*get_rxfh_indir_size)(struct net_device *); + int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rxfh)(struct net_device *, const u32 *indir, + const u8 *key, const u8 hfunc); + int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc, u32 rss_context); + int (*set_rxfh_context)(struct net_device *, const u32 *indir, + const u8 *key, const u8 hfunc, + u32 *rss_context, bool delete); + void (*get_channels)(struct net_device *, struct ethtool_channels *); + int (*set_channels)(struct net_device *, struct ethtool_channels *); + int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); + int (*get_dump_data)(struct net_device *, + struct ethtool_dump *, void *); + int (*set_dump)(struct net_device *, struct ethtool_dump *); + int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + int (*get_module_info)(struct net_device *, + struct ethtool_modinfo *); + int (*get_module_eeprom)(struct net_device *, + struct ethtool_eeprom *, u8 *); + int (*get_eee)(struct net_device *, struct ethtool_eee *); + int (*set_eee)(struct net_device *, struct ethtool_eee *); + int (*get_tunable)(struct net_device *, + const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, + const struct ethtool_tunable *, const void *); + int (*get_per_queue_coalesce)(struct net_device *, u32, + struct ethtool_coalesce *); + int (*set_per_queue_coalesce)(struct net_device *, u32, + struct ethtool_coalesce *); + int (*get_link_ksettings)(struct net_device *, + struct ethtool_link_ksettings *); + int (*set_link_ksettings)(struct net_device *, + const struct ethtool_link_ksettings *); + int (*get_fecparam)(struct net_device *, + struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, + struct ethtool_fecparam *); + void (*get_ethtool_phy_stats)(struct net_device *, + struct ethtool_stats *, u64 *); +}; + +int ethtool_check_ops(const struct ethtool_ops *ops); + +struct ethtool_rx_flow_rule { + struct flow_rule *rule; + unsigned long priv[]; +}; + +struct ethtool_rx_flow_spec_input { + const struct ethtool_rx_flow_spec *fs; + u32 rss_ctx; +}; + +struct ethtool_rx_flow_rule * +ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input); +void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule); + +bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd); +int ethtool_virtdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd, + u32 *dev_speed, u8 *dev_duplex); +# 38 "./include/linux/netdevice.h" 2 +# 1 "./include/net/net_namespace.h" 1 +# 16 "./include/net/net_namespace.h" +# 1 "./include/net/netns/core.h" 1 + + + + +struct ctl_table_header; +struct prot_inuse; + +struct netns_core { + + struct ctl_table_header *sysctl_hdr; + + int sysctl_somaxconn; + + + int *sock_inuse; + struct prot_inuse *prot_inuse; + +}; +# 17 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/mib.h" 1 + + + + +# 1 "./include/net/snmp.h" 1 +# 18 "./include/net/snmp.h" +# 1 "./include/uapi/linux/snmp.h" 1 +# 19 "./include/uapi/linux/snmp.h" +enum +{ + IPSTATS_MIB_NUM = 0, + + IPSTATS_MIB_INPKTS, + IPSTATS_MIB_INOCTETS, + IPSTATS_MIB_INDELIVERS, + IPSTATS_MIB_OUTFORWDATAGRAMS, + IPSTATS_MIB_OUTPKTS, + IPSTATS_MIB_OUTOCTETS, + + IPSTATS_MIB_INHDRERRORS, + IPSTATS_MIB_INTOOBIGERRORS, + IPSTATS_MIB_INNOROUTES, + IPSTATS_MIB_INADDRERRORS, + IPSTATS_MIB_INUNKNOWNPROTOS, + IPSTATS_MIB_INTRUNCATEDPKTS, + IPSTATS_MIB_INDISCARDS, + IPSTATS_MIB_OUTDISCARDS, + IPSTATS_MIB_OUTNOROUTES, + IPSTATS_MIB_REASMTIMEOUT, + IPSTATS_MIB_REASMREQDS, + IPSTATS_MIB_REASMOKS, + IPSTATS_MIB_REASMFAILS, + IPSTATS_MIB_FRAGOKS, + IPSTATS_MIB_FRAGFAILS, + IPSTATS_MIB_FRAGCREATES, + IPSTATS_MIB_INMCASTPKTS, + IPSTATS_MIB_OUTMCASTPKTS, + IPSTATS_MIB_INBCASTPKTS, + IPSTATS_MIB_OUTBCASTPKTS, + IPSTATS_MIB_INMCASTOCTETS, + IPSTATS_MIB_OUTMCASTOCTETS, + IPSTATS_MIB_INBCASTOCTETS, + IPSTATS_MIB_OUTBCASTOCTETS, + IPSTATS_MIB_CSUMERRORS, + IPSTATS_MIB_NOECTPKTS, + IPSTATS_MIB_ECT1PKTS, + IPSTATS_MIB_ECT0PKTS, + IPSTATS_MIB_CEPKTS, + IPSTATS_MIB_REASM_OVERLAPS, + __IPSTATS_MIB_MAX +}; + + + + + + +enum +{ + ICMP_MIB_NUM = 0, + ICMP_MIB_INMSGS, + ICMP_MIB_INERRORS, + ICMP_MIB_INDESTUNREACHS, + ICMP_MIB_INTIMEEXCDS, + ICMP_MIB_INPARMPROBS, + ICMP_MIB_INSRCQUENCHS, + ICMP_MIB_INREDIRECTS, + ICMP_MIB_INECHOS, + ICMP_MIB_INECHOREPS, + ICMP_MIB_INTIMESTAMPS, + ICMP_MIB_INTIMESTAMPREPS, + ICMP_MIB_INADDRMASKS, + ICMP_MIB_INADDRMASKREPS, + ICMP_MIB_OUTMSGS, + ICMP_MIB_OUTERRORS, + ICMP_MIB_OUTDESTUNREACHS, + ICMP_MIB_OUTTIMEEXCDS, + ICMP_MIB_OUTPARMPROBS, + ICMP_MIB_OUTSRCQUENCHS, + ICMP_MIB_OUTREDIRECTS, + ICMP_MIB_OUTECHOS, + ICMP_MIB_OUTECHOREPS, + ICMP_MIB_OUTTIMESTAMPS, + ICMP_MIB_OUTTIMESTAMPREPS, + ICMP_MIB_OUTADDRMASKS, + ICMP_MIB_OUTADDRMASKREPS, + ICMP_MIB_CSUMERRORS, + __ICMP_MIB_MAX +}; + + + + + + + +enum +{ + ICMP6_MIB_NUM = 0, + ICMP6_MIB_INMSGS, + ICMP6_MIB_INERRORS, + ICMP6_MIB_OUTMSGS, + ICMP6_MIB_OUTERRORS, + ICMP6_MIB_CSUMERRORS, + __ICMP6_MIB_MAX +}; +# 125 "./include/uapi/linux/snmp.h" +enum +{ + TCP_MIB_NUM = 0, + TCP_MIB_RTOALGORITHM, + TCP_MIB_RTOMIN, + TCP_MIB_RTOMAX, + TCP_MIB_MAXCONN, + TCP_MIB_ACTIVEOPENS, + TCP_MIB_PASSIVEOPENS, + TCP_MIB_ATTEMPTFAILS, + TCP_MIB_ESTABRESETS, + TCP_MIB_CURRESTAB, + TCP_MIB_INSEGS, + TCP_MIB_OUTSEGS, + TCP_MIB_RETRANSSEGS, + TCP_MIB_INERRS, + TCP_MIB_OUTRSTS, + TCP_MIB_CSUMERRORS, + __TCP_MIB_MAX +}; + + + + + + +enum +{ + UDP_MIB_NUM = 0, + UDP_MIB_INDATAGRAMS, + UDP_MIB_NOPORTS, + UDP_MIB_INERRORS, + UDP_MIB_OUTDATAGRAMS, + UDP_MIB_RCVBUFERRORS, + UDP_MIB_SNDBUFERRORS, + UDP_MIB_CSUMERRORS, + UDP_MIB_IGNOREDMULTI, + __UDP_MIB_MAX +}; + + +enum +{ + LINUX_MIB_NUM = 0, + LINUX_MIB_SYNCOOKIESSENT, + LINUX_MIB_SYNCOOKIESRECV, + LINUX_MIB_SYNCOOKIESFAILED, + LINUX_MIB_EMBRYONICRSTS, + LINUX_MIB_PRUNECALLED, + LINUX_MIB_RCVPRUNED, + LINUX_MIB_OFOPRUNED, + LINUX_MIB_OUTOFWINDOWICMPS, + LINUX_MIB_LOCKDROPPEDICMPS, + LINUX_MIB_ARPFILTER, + LINUX_MIB_TIMEWAITED, + LINUX_MIB_TIMEWAITRECYCLED, + LINUX_MIB_TIMEWAITKILLED, + LINUX_MIB_PAWSACTIVEREJECTED, + LINUX_MIB_PAWSESTABREJECTED, + LINUX_MIB_DELAYEDACKS, + LINUX_MIB_DELAYEDACKLOCKED, + LINUX_MIB_DELAYEDACKLOST, + LINUX_MIB_LISTENOVERFLOWS, + LINUX_MIB_LISTENDROPS, + LINUX_MIB_TCPHPHITS, + LINUX_MIB_TCPPUREACKS, + LINUX_MIB_TCPHPACKS, + LINUX_MIB_TCPRENORECOVERY, + LINUX_MIB_TCPSACKRECOVERY, + LINUX_MIB_TCPSACKRENEGING, + LINUX_MIB_TCPSACKREORDER, + LINUX_MIB_TCPRENOREORDER, + LINUX_MIB_TCPTSREORDER, + LINUX_MIB_TCPFULLUNDO, + LINUX_MIB_TCPPARTIALUNDO, + LINUX_MIB_TCPDSACKUNDO, + LINUX_MIB_TCPLOSSUNDO, + LINUX_MIB_TCPLOSTRETRANSMIT, + LINUX_MIB_TCPRENOFAILURES, + LINUX_MIB_TCPSACKFAILURES, + LINUX_MIB_TCPLOSSFAILURES, + LINUX_MIB_TCPFASTRETRANS, + LINUX_MIB_TCPSLOWSTARTRETRANS, + LINUX_MIB_TCPTIMEOUTS, + LINUX_MIB_TCPLOSSPROBES, + LINUX_MIB_TCPLOSSPROBERECOVERY, + LINUX_MIB_TCPRENORECOVERYFAIL, + LINUX_MIB_TCPSACKRECOVERYFAIL, + LINUX_MIB_TCPRCVCOLLAPSED, + LINUX_MIB_TCPDSACKOLDSENT, + LINUX_MIB_TCPDSACKOFOSENT, + LINUX_MIB_TCPDSACKRECV, + LINUX_MIB_TCPDSACKOFORECV, + LINUX_MIB_TCPABORTONDATA, + LINUX_MIB_TCPABORTONCLOSE, + LINUX_MIB_TCPABORTONMEMORY, + LINUX_MIB_TCPABORTONTIMEOUT, + LINUX_MIB_TCPABORTONLINGER, + LINUX_MIB_TCPABORTFAILED, + LINUX_MIB_TCPMEMORYPRESSURES, + LINUX_MIB_TCPMEMORYPRESSURESCHRONO, + LINUX_MIB_TCPSACKDISCARD, + LINUX_MIB_TCPDSACKIGNOREDOLD, + LINUX_MIB_TCPDSACKIGNOREDNOUNDO, + LINUX_MIB_TCPSPURIOUSRTOS, + LINUX_MIB_TCPMD5NOTFOUND, + LINUX_MIB_TCPMD5UNEXPECTED, + LINUX_MIB_TCPMD5FAILURE, + LINUX_MIB_SACKSHIFTED, + LINUX_MIB_SACKMERGED, + LINUX_MIB_SACKSHIFTFALLBACK, + LINUX_MIB_TCPBACKLOGDROP, + LINUX_MIB_PFMEMALLOCDROP, + LINUX_MIB_TCPMINTTLDROP, + LINUX_MIB_TCPDEFERACCEPTDROP, + LINUX_MIB_IPRPFILTER, + LINUX_MIB_TCPTIMEWAITOVERFLOW, + LINUX_MIB_TCPREQQFULLDOCOOKIES, + LINUX_MIB_TCPREQQFULLDROP, + LINUX_MIB_TCPRETRANSFAIL, + LINUX_MIB_TCPRCVCOALESCE, + LINUX_MIB_TCPBACKLOGCOALESCE, + LINUX_MIB_TCPOFOQUEUE, + LINUX_MIB_TCPOFODROP, + LINUX_MIB_TCPOFOMERGE, + LINUX_MIB_TCPCHALLENGEACK, + LINUX_MIB_TCPSYNCHALLENGE, + LINUX_MIB_TCPFASTOPENACTIVE, + LINUX_MIB_TCPFASTOPENACTIVEFAIL, + LINUX_MIB_TCPFASTOPENPASSIVE, + LINUX_MIB_TCPFASTOPENPASSIVEFAIL, + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, + LINUX_MIB_TCPFASTOPENCOOKIEREQD, + LINUX_MIB_TCPFASTOPENBLACKHOLE, + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, + LINUX_MIB_BUSYPOLLRXPACKETS, + LINUX_MIB_TCPAUTOCORKING, + LINUX_MIB_TCPFROMZEROWINDOWADV, + LINUX_MIB_TCPTOZEROWINDOWADV, + LINUX_MIB_TCPWANTZEROWINDOWADV, + LINUX_MIB_TCPSYNRETRANS, + LINUX_MIB_TCPORIGDATASENT, + LINUX_MIB_TCPHYSTARTTRAINDETECT, + LINUX_MIB_TCPHYSTARTTRAINCWND, + LINUX_MIB_TCPHYSTARTDELAYDETECT, + LINUX_MIB_TCPHYSTARTDELAYCWND, + LINUX_MIB_TCPACKSKIPPEDSYNRECV, + LINUX_MIB_TCPACKSKIPPEDPAWS, + LINUX_MIB_TCPACKSKIPPEDSEQ, + LINUX_MIB_TCPACKSKIPPEDFINWAIT2, + LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, + LINUX_MIB_TCPACKSKIPPEDCHALLENGE, + LINUX_MIB_TCPWINPROBE, + LINUX_MIB_TCPKEEPALIVE, + LINUX_MIB_TCPMTUPFAIL, + LINUX_MIB_TCPMTUPSUCCESS, + LINUX_MIB_TCPDELIVERED, + LINUX_MIB_TCPDELIVEREDCE, + LINUX_MIB_TCPACKCOMPRESSED, + LINUX_MIB_TCPZEROWINDOWDROP, + LINUX_MIB_TCPRCVQDROP, + LINUX_MIB_TCPWQUEUETOOBIG, + LINUX_MIB_TCPFASTOPENPASSIVEALTKEY, + LINUX_MIB_TCPTIMEOUTREHASH, + LINUX_MIB_TCPDUPLICATEDATAREHASH, + __LINUX_MIB_MAX +}; + + +enum +{ + LINUX_MIB_XFRMNUM = 0, + LINUX_MIB_XFRMINERROR, + LINUX_MIB_XFRMINBUFFERERROR, + LINUX_MIB_XFRMINHDRERROR, + LINUX_MIB_XFRMINNOSTATES, + LINUX_MIB_XFRMINSTATEPROTOERROR, + LINUX_MIB_XFRMINSTATEMODEERROR, + LINUX_MIB_XFRMINSTATESEQERROR, + LINUX_MIB_XFRMINSTATEEXPIRED, + LINUX_MIB_XFRMINSTATEMISMATCH, + LINUX_MIB_XFRMINSTATEINVALID, + LINUX_MIB_XFRMINTMPLMISMATCH, + LINUX_MIB_XFRMINNOPOLS, + LINUX_MIB_XFRMINPOLBLOCK, + LINUX_MIB_XFRMINPOLERROR, + LINUX_MIB_XFRMOUTERROR, + LINUX_MIB_XFRMOUTBUNDLEGENERROR, + LINUX_MIB_XFRMOUTBUNDLECHECKERROR, + LINUX_MIB_XFRMOUTNOSTATES, + LINUX_MIB_XFRMOUTSTATEPROTOERROR, + LINUX_MIB_XFRMOUTSTATEMODEERROR, + LINUX_MIB_XFRMOUTSTATESEQERROR, + LINUX_MIB_XFRMOUTSTATEEXPIRED, + LINUX_MIB_XFRMOUTPOLBLOCK, + LINUX_MIB_XFRMOUTPOLDEAD, + LINUX_MIB_XFRMOUTPOLERROR, + LINUX_MIB_XFRMFWDHDRERROR, + LINUX_MIB_XFRMOUTSTATEINVALID, + LINUX_MIB_XFRMACQUIREERROR, + __LINUX_MIB_XFRMMAX +}; + + +enum +{ + LINUX_MIB_TLSNUM = 0, + LINUX_MIB_TLSCURRTXSW, + LINUX_MIB_TLSCURRRXSW, + LINUX_MIB_TLSCURRTXDEVICE, + LINUX_MIB_TLSCURRRXDEVICE, + LINUX_MIB_TLSTXSW, + LINUX_MIB_TLSRXSW, + LINUX_MIB_TLSTXDEVICE, + LINUX_MIB_TLSRXDEVICE, + LINUX_MIB_TLSDECRYPTERROR, + LINUX_MIB_TLSRXDEVICERESYNC, + __LINUX_MIB_TLSMAX +}; +# 19 "./include/net/snmp.h" 2 +# 29 "./include/net/snmp.h" +struct snmp_mib { + const char *name; + int entry; +}; +# 51 "./include/net/snmp.h" +struct ipstats_mib { + + u64 mibs[__IPSTATS_MIB_MAX]; + struct u64_stats_sync syncp; +}; + + + +struct icmp_mib { + unsigned long mibs[__ICMP_MIB_MAX]; +}; + + +struct icmpmsg_mib { + atomic_long_t mibs[512]; +}; + + + + +struct icmpv6_mib { + unsigned long mibs[__ICMP6_MIB_MAX]; +}; + +struct icmpv6_mib_device { + atomic_long_t mibs[__ICMP6_MIB_MAX]; +}; + + + +struct icmpv6msg_mib { + atomic_long_t mibs[512]; +}; + +struct icmpv6msg_mib_device { + atomic_long_t mibs[512]; +}; + + + + +struct tcp_mib { + unsigned long mibs[__TCP_MIB_MAX]; +}; + + + +struct udp_mib { + unsigned long mibs[__UDP_MIB_MAX]; +}; + + + +struct linux_mib { + unsigned long mibs[__LINUX_MIB_MAX]; +}; + + + +struct linux_xfrm_mib { + unsigned long mibs[__LINUX_MIB_XFRMMAX]; +}; + + + +struct linux_tls_mib { + unsigned long mibs[__LINUX_MIB_TLSMAX]; +}; +# 6 "./include/net/netns/mib.h" 2 + +struct netns_mib { + __typeof__(struct tcp_mib) *tcp_statistics; + __typeof__(struct ipstats_mib) *ip_statistics; + __typeof__(struct linux_mib) *net_statistics; + __typeof__(struct udp_mib) *udp_statistics; + __typeof__(struct udp_mib) *udplite_statistics; + __typeof__(struct icmp_mib) *icmp_statistics; + __typeof__(struct icmpmsg_mib) *icmpmsg_statistics; + + + struct proc_dir_entry *proc_net_devsnmp6; + __typeof__(struct udp_mib) *udp_stats_in6; + __typeof__(struct udp_mib) *udplite_stats_in6; + __typeof__(struct ipstats_mib) *ipv6_statistics; + __typeof__(struct icmpv6_mib) *icmpv6_statistics; + __typeof__(struct icmpv6msg_mib) *icmpv6msg_statistics; + + + __typeof__(struct linux_xfrm_mib) *xfrm_statistics; + + + __typeof__(struct linux_tls_mib) *tls_statistics; + + + __typeof__(struct mptcp_mib) *mptcp_statistics; + +}; +# 18 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/unix.h" 1 + + + + + + + +struct ctl_table_header; +struct netns_unix { + int sysctl_max_dgram_qlen; + struct ctl_table_header *ctl; +}; +# 19 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/packet.h" 1 +# 11 "./include/net/netns/packet.h" +struct netns_packet { + struct mutex sklist_lock; + struct hlist_head sklist; +}; +# 20 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/ipv4.h" 1 +# 10 "./include/net/netns/ipv4.h" +# 1 "./include/net/inet_frag.h" 1 +# 9 "./include/net/inet_frag.h" +struct fqdir { + + long high_thresh; + long low_thresh; + int timeout; + int max_dist; + struct inet_frags *f; + struct net *net; + bool dead; + + struct rhashtable rhashtable __attribute__((__aligned__((1 << (6))))); + + + atomic_long_t mem __attribute__((__aligned__((1 << (6))))); + struct work_struct destroy_work; +}; +# 34 "./include/net/inet_frag.h" +enum { + INET_FRAG_FIRST_IN = ((((1UL))) << (0)), + INET_FRAG_LAST_IN = ((((1UL))) << (1)), + INET_FRAG_COMPLETE = ((((1UL))) << (2)), + INET_FRAG_HASH_DEAD = ((((1UL))) << (3)), +}; + +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; +# 77 "./include/net/inet_frag.h" +struct inet_frag_queue { + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; + struct timer_list timer; + spinlock_t lock; + refcount_t refcnt; + struct rb_root rb_fragments; + struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; + ktime_t stamp; + int len; + int meat; + __u8 flags; + u16 max_size; + struct fqdir *fqdir; + struct callback_head rcu; +}; + +struct inet_frags { + unsigned int qsize; + + void (*constructor)(struct inet_frag_queue *q, + const void *arg); + void (*destructor)(struct inet_frag_queue *); + void (*frag_expire)(struct timer_list *t); + struct kmem_cache *frags_cachep; + const char *frags_cache_name; + struct rhashtable_params rhash_params; + refcount_t refcnt; + struct completion completion; +}; + +int inet_frags_init(struct inet_frags *); +void inet_frags_fini(struct inet_frags *); + +int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fqdir_pre_exit(struct fqdir *fqdir) +{ + fqdir->high_thresh = 0; + fqdir->dead = true; +} +void fqdir_exit(struct fqdir *fqdir); + +void inet_frag_kill(struct inet_frag_queue *q); +void inet_frag_destroy(struct inet_frag_queue *q); +struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key); + + +unsigned int inet_frag_rbtree_purge(struct rb_root *root); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inet_frag_put(struct inet_frag_queue *q) +{ + if (refcount_dec_and_test(&q->refcnt)) + inet_frag_destroy(q); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long frag_mem_limit(const struct fqdir *fqdir) +{ + return atomic_long_read(&fqdir->mem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sub_frag_mem_limit(struct fqdir *fqdir, long val) +{ + atomic_long_sub(val, &fqdir->mem); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void add_frag_mem_limit(struct fqdir *fqdir, long val) +{ + atomic_long_add(val, &fqdir->mem); +} +# 163 "./include/net/inet_frag.h" +extern const u8 ip_frag_ecn_table[16]; + + + + + +int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, + int offset, int end); +void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, + struct sk_buff *parent); +void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, + void *reasm_data, bool try_coalesce); +struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); +# 11 "./include/net/netns/ipv4.h" 2 + + + +struct tcpm_hash_bucket; +struct ctl_table_header; +struct ipv4_devconf; +struct fib_rules_ops; +struct hlist_head; +struct fib_table; +struct sock; +struct local_ports { + seqlock_t lock; + int range[2]; + bool warned; +}; + +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + +struct inet_hashinfo; + +struct inet_timewait_death_row { + atomic_t tw_count; + + struct inet_hashinfo *hashinfo __attribute__((__aligned__((1 << (6))))); + int sysctl_max_tw_buckets; +}; + +struct tcp_fastopen_context; + +struct netns_ipv4 { + + struct ctl_table_header *forw_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *ipv4_hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *xfrm4_hdr; + + struct ipv4_devconf *devconf_all; + struct ipv4_devconf *devconf_dflt; + struct ip_ra_chain *ra_chain; + struct mutex ra_mutex; + + struct fib_rules_ops *rules_ops; + bool fib_has_custom_rules; + unsigned int fib_rules_require_fldissect; + struct fib_table *fib_main; + struct fib_table *fib_default; + + bool fib_has_custom_local_routes; + + int fib_num_tclassid_users; + + struct hlist_head *fib_table_hash; + bool fib_offload_disabled; + struct sock *fibnl; + + struct sock * *icmp_sk; + struct sock *mc_autojoin_sk; + + struct inet_peer_base *peers; + struct sock * *tcp_sk; + struct fqdir *fqdir; + + struct xt_table *iptable_filter; + struct xt_table *iptable_mangle; + struct xt_table *iptable_raw; + struct xt_table *arptable_filter; + + struct xt_table *iptable_security; + + struct xt_table *nat_table; + + + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; + int sysctl_icmp_errors_use_inbound_ifaddr; + + struct local_ports ip_local_ports; + + int sysctl_tcp_ecn; + int sysctl_tcp_ecn_fallback; + + int sysctl_ip_default_ttl; + int sysctl_ip_no_pmtu_disc; + int sysctl_ip_fwd_use_pmtu; + int sysctl_ip_fwd_update_priority; + int sysctl_ip_nonlocal_bind; + int sysctl_ip_autobind_reuse; + + int sysctl_ip_dynaddr; + int sysctl_ip_early_demux; + + int sysctl_raw_l3mdev_accept; + + int sysctl_tcp_early_demux; + int sysctl_udp_early_demux; + + int sysctl_nexthop_compat_mode; + + int sysctl_fwmark_reflect; + int sysctl_tcp_fwmark_accept; + + int sysctl_tcp_l3mdev_accept; + + int sysctl_tcp_mtu_probing; + int sysctl_tcp_mtu_probe_floor; + int sysctl_tcp_base_mss; + int sysctl_tcp_min_snd_mss; + int sysctl_tcp_probe_threshold; + u32 sysctl_tcp_probe_interval; + + int sysctl_tcp_keepalive_time; + int sysctl_tcp_keepalive_probes; + int sysctl_tcp_keepalive_intvl; + + int sysctl_tcp_syn_retries; + int sysctl_tcp_synack_retries; + int sysctl_tcp_syncookies; + int sysctl_tcp_reordering; + int sysctl_tcp_retries1; + int sysctl_tcp_retries2; + int sysctl_tcp_orphan_retries; + int sysctl_tcp_fin_timeout; + unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_tw_reuse; + int sysctl_tcp_sack; + int sysctl_tcp_window_scaling; + int sysctl_tcp_timestamps; + int sysctl_tcp_early_retrans; + int sysctl_tcp_recovery; + int sysctl_tcp_thin_linear_timeouts; + int sysctl_tcp_slow_start_after_idle; + int sysctl_tcp_retrans_collapse; + int sysctl_tcp_stdurg; + int sysctl_tcp_rfc1337; + int sysctl_tcp_abort_on_overflow; + int sysctl_tcp_fack; + int sysctl_tcp_max_reordering; + int sysctl_tcp_dsack; + int sysctl_tcp_app_win; + int sysctl_tcp_adv_win_scale; + int sysctl_tcp_frto; + int sysctl_tcp_nometrics_save; + int sysctl_tcp_no_ssthresh_metrics_save; + int sysctl_tcp_moderate_rcvbuf; + int sysctl_tcp_tso_win_divisor; + int sysctl_tcp_workaround_signed_windows; + int sysctl_tcp_limit_output_bytes; + int sysctl_tcp_challenge_ack_limit; + int sysctl_tcp_min_tso_segs; + int sysctl_tcp_min_rtt_wlen; + int sysctl_tcp_autocorking; + int sysctl_tcp_invalid_ratelimit; + int sysctl_tcp_pacing_ss_ratio; + int sysctl_tcp_pacing_ca_ratio; + int sysctl_tcp_wmem[3]; + int sysctl_tcp_rmem[3]; + int sysctl_tcp_comp_sack_nr; + unsigned long sysctl_tcp_comp_sack_delay_ns; + unsigned long sysctl_tcp_comp_sack_slack_ns; + struct inet_timewait_death_row tcp_death_row; + int sysctl_max_syn_backlog; + int sysctl_tcp_fastopen; + const struct tcp_congestion_ops *tcp_congestion_control; + struct tcp_fastopen_context *tcp_fastopen_ctx; + spinlock_t tcp_fastopen_ctx_lock; + unsigned int sysctl_tcp_fastopen_blackhole_timeout; + atomic_t tfo_active_disable_times; + unsigned long tfo_active_disable_stamp; + + int sysctl_udp_wmem_min; + int sysctl_udp_rmem_min; + + + int sysctl_udp_l3mdev_accept; + + + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; + int sysctl_igmp_llm_reports; + int sysctl_igmp_qrv; + + struct ping_group_range ping_group_range; + + atomic_t dev_addr_genid; + + + unsigned long *sysctl_local_reserved_ports; + int sysctl_ip_prot_sock; + + + + + + + struct list_head mr_tables; + struct fib_rules_ops *mr_rules_ops; + + + + int sysctl_fib_multipath_use_neigh; + int sysctl_fib_multipath_hash_policy; + + + struct fib_notifier_ops *notifier_ops; + unsigned int fib_seq; + + struct fib_notifier_ops *ipmr_notifier_ops; + unsigned int ipmr_seq; + + atomic_t rt_genid; + siphash_key_t ip_id_key; +}; +# 21 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/ipv6.h" 1 +# 10 "./include/net/netns/ipv6.h" +# 1 "./include/net/dst_ops.h" 1 + + + + + + + +struct dst_entry; +struct kmem_cachep; +struct net_device; +struct sk_buff; +struct sock; +struct net; + +struct dst_ops { + unsigned short family; + unsigned int gc_thresh; + + int (*gc)(struct dst_ops *ops); + struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); + unsigned int (*default_advmss)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); + u32 * (*cow_metrics)(struct dst_entry *, unsigned long); + void (*destroy)(struct dst_entry *); + void (*ifdown)(struct dst_entry *, + struct net_device *dev, int how); + struct dst_entry * (*negative_advice)(struct dst_entry *); + void (*link_failure)(struct sk_buff *); + void (*update_pmtu)(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb, u32 mtu, + bool confirm_neigh); + void (*redirect)(struct dst_entry *dst, struct sock *sk, + struct sk_buff *skb); + int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb); + struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, + struct sk_buff *skb, + const void *daddr); + void (*confirm_neigh)(const struct dst_entry *dst, + const void *daddr); + + struct kmem_cache *kmem_cachep; + + struct percpu_counter pcpuc_entries __attribute__((__aligned__((1 << (6))))); +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dst_entries_get_fast(struct dst_ops *dst) +{ + return percpu_counter_read_positive(&dst->pcpuc_entries); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dst_entries_get_slow(struct dst_ops *dst) +{ + return percpu_counter_sum_positive(&dst->pcpuc_entries); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_entries_add(struct dst_ops *dst, int val) +{ + percpu_counter_add_batch(&dst->pcpuc_entries, val, + 32); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dst_entries_init(struct dst_ops *dst) +{ + return ({ static struct lock_class_key __key; __percpu_counter_init(&dst->pcpuc_entries, 0, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)), &__key); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_entries_destroy(struct dst_ops *dst) +{ + percpu_counter_destroy(&dst->pcpuc_entries); +} +# 11 "./include/net/netns/ipv6.h" 2 +# 1 "./include/uapi/linux/icmpv6.h" 1 + + + + + + + +struct icmp6hdr { + + __u8 icmp6_type; + __u8 icmp6_code; + __sum16 icmp6_cksum; + + + union { + __be32 un_data32[1]; + __be16 un_data16[2]; + __u8 un_data8[4]; + + struct icmpv6_echo { + __be16 identifier; + __be16 sequence; + } u_echo; + + struct icmpv6_nd_advt { + + __u32 reserved:5, + override:1, + solicited:1, + router:1, + reserved2:24; +# 40 "./include/uapi/linux/icmpv6.h" + } u_nd_advt; + + struct icmpv6_nd_ra { + __u8 hop_limit; + + __u8 reserved:3, + router_pref:2, + home_agent:1, + other:1, + managed:1; +# 60 "./include/uapi/linux/icmpv6.h" + __be16 rt_lifetime; + } u_nd_ra; + + } icmp6_dataun; +# 80 "./include/uapi/linux/icmpv6.h" +}; +# 156 "./include/uapi/linux/icmpv6.h" +struct icmp6_filter { + __u32 data[8]; +}; +# 12 "./include/net/netns/ipv6.h" 2 + +struct ctl_table_header; + +struct netns_sysctl_ipv6 { + + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; + struct ctl_table_header *frags_hdr; + struct ctl_table_header *xfrm6_hdr; + + int bindv6only; + int flush_delay; + int ip6_rt_max_size; + int ip6_rt_gc_min_interval; + int ip6_rt_gc_timeout; + int ip6_rt_gc_interval; + int ip6_rt_gc_elasticity; + int ip6_rt_mtu_expires; + int ip6_rt_min_advmss; + int multipath_hash_policy; + int flowlabel_consistency; + int auto_flowlabels; + int icmpv6_time; + int icmpv6_echo_ignore_all; + int icmpv6_echo_ignore_multicast; + int icmpv6_echo_ignore_anycast; + unsigned long icmpv6_ratemask[(((255 + 1) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; + unsigned long *icmpv6_ratemask_ptr; + int anycast_src_echo_reply; + int ip_nonlocal_bind; + int fwmark_reflect; + int idgen_retries; + int idgen_delay; + int flowlabel_state_ranges; + int flowlabel_reflect; + int max_dst_opts_cnt; + int max_hbh_opts_cnt; + int max_dst_opts_len; + int max_hbh_opts_len; + int seg6_flowlabel; + bool skip_notify_on_dev_down; +}; + +struct netns_ipv6 { + struct netns_sysctl_ipv6 sysctl; + struct ipv6_devconf *devconf_all; + struct ipv6_devconf *devconf_dflt; + struct inet_peer_base *peers; + struct fqdir *fqdir; + + struct xt_table *ip6table_filter; + struct xt_table *ip6table_mangle; + struct xt_table *ip6table_raw; + + struct xt_table *ip6table_security; + + struct xt_table *ip6table_nat; + + struct fib6_info *fib6_null_entry; + struct rt6_info *ip6_null_entry; + struct rt6_statistics *rt6_stats; + struct timer_list ip6_fib_timer; + struct hlist_head *fib_table_hash; + struct fib6_table *fib6_main_tbl; + struct list_head fib6_walkers; + struct dst_ops ip6_dst_ops; + rwlock_t fib6_walker_lock; + spinlock_t fib6_gc_lock; + unsigned int ip6_rt_gc_expire; + unsigned long ip6_rt_last_gc; + + unsigned int fib6_rules_require_fldissect; + bool fib6_has_custom_rules; + + unsigned int fib6_routes_require_src; + + struct rt6_info *ip6_prohibit_entry; + struct rt6_info *ip6_blk_hole_entry; + struct fib6_table *fib6_local_tbl; + struct fib_rules_ops *fib6_rules_ops; + + struct sock * *icmp_sk; + struct sock *ndisc_sk; + struct sock *tcp_sk; + struct sock *igmp_sk; + struct sock *mc_autojoin_sk; + + + + + struct list_head mr6_tables; + struct fib_rules_ops *mr6_rules_ops; + + + atomic_t dev_addr_genid; + atomic_t fib6_sernum; + struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; + struct fib_notifier_ops *ip6mr_notifier_ops; + unsigned int ipmr_seq; + struct { + struct hlist_head head; + spinlock_t lock; + u32 seq; + } ip6addrlbl_table; +}; + + +struct netns_nf_frag { + struct fqdir *fqdir; +}; +# 22 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/nexthop.h" 1 +# 11 "./include/net/netns/nexthop.h" +struct netns_nexthop { + struct rb_root rb_root; + struct hlist_head *devhash; + + unsigned int seq; + u32 last_id_allocated; + struct atomic_notifier_head notifier_chain; +}; +# 23 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/ieee802154_6lowpan.h" 1 +# 11 "./include/net/netns/ieee802154_6lowpan.h" +struct netns_sysctl_lowpan { + + struct ctl_table_header *frags_hdr; + +}; + +struct netns_ieee802154_lowpan { + struct netns_sysctl_lowpan sysctl; + struct fqdir *fqdir; +}; +# 24 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/sctp.h" 1 + + + + +struct sock; +struct proc_dir_entry; +struct sctp_mib; +struct ctl_table_header; + +struct netns_sctp { + __typeof__(struct sctp_mib) *sctp_statistics; + + + struct proc_dir_entry *proc_net_sctp; + + + struct ctl_table_header *sysctl_header; + + + + + + struct sock *ctl_sock; + + + + + + + + struct list_head local_addr_list; + struct list_head addr_waitq; + struct timer_list addr_wq_timer; + struct list_head auto_asconf_splist; + + spinlock_t addr_wq_lock; + + + spinlock_t local_addr_lock; +# 51 "./include/net/netns/sctp.h" + unsigned int rto_initial; + unsigned int rto_min; + unsigned int rto_max; + + + + + int rto_alpha; + int rto_beta; + + + int max_burst; + + + int cookie_preserve_enable; + + + char *sctp_hmac_alg; + + + unsigned int valid_cookie_life; + + + unsigned int sack_timeout; + + + unsigned int hb_interval; + + + + + + int max_retrans_association; + int max_retrans_path; + int max_retrans_init; + + + + + int pf_retrans; + + + + + + int ps_retrans; + + + + + + + int pf_enable; + + + + + + + + int pf_expose; + + + + + + + int sndbuf_policy; + + + + + + + int rcvbuf_policy; + + int default_auto_asconf; + + + int addip_enable; + int addip_noauth; + + + int prsctp_enable; + + + int reconf_enable; + + + int auth_enable; + + + int intl_enable; + + + int ecn_enable; +# 155 "./include/net/netns/sctp.h" + int scope_policy; + + + + + int rwnd_upd_shift; + + + unsigned long max_autoclose; +}; +# 25 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/dccp.h" 1 + + + + +struct sock; + +struct netns_dccp { + struct sock *v4_ctl_sk; + struct sock *v6_ctl_sk; +}; +# 26 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/netfilter.h" 1 + + + + +# 1 "./include/linux/netfilter_defs.h" 1 + + + + +# 1 "./include/uapi/linux/netfilter.h" 1 + + + + + + +# 1 "./include/linux/in.h" 1 +# 19 "./include/linux/in.h" +# 1 "./include/uapi/linux/in.h" 1 +# 28 "./include/uapi/linux/in.h" +enum { + IPPROTO_IP = 0, + + IPPROTO_ICMP = 1, + + IPPROTO_IGMP = 2, + + IPPROTO_IPIP = 4, + + IPPROTO_TCP = 6, + + IPPROTO_EGP = 8, + + IPPROTO_PUP = 12, + + IPPROTO_UDP = 17, + + IPPROTO_IDP = 22, + + IPPROTO_TP = 29, + + IPPROTO_DCCP = 33, + + IPPROTO_IPV6 = 41, + + IPPROTO_RSVP = 46, + + IPPROTO_GRE = 47, + + IPPROTO_ESP = 50, + + IPPROTO_AH = 51, + + IPPROTO_MTP = 92, + + IPPROTO_BEETPH = 94, + + IPPROTO_ENCAP = 98, + + IPPROTO_PIM = 103, + + IPPROTO_COMP = 108, + + IPPROTO_SCTP = 132, + + IPPROTO_UDPLITE = 136, + + IPPROTO_MPLS = 137, + + IPPROTO_ETHERNET = 143, + + IPPROTO_RAW = 255, + + IPPROTO_MPTCP = 262, + + IPPROTO_MAX +}; + + + + +struct in_addr { + __be32 s_addr; +}; +# 172 "./include/uapi/linux/in.h" +struct ip_mreq { + struct in_addr imr_multiaddr; + struct in_addr imr_interface; +}; + +struct ip_mreqn { + struct in_addr imr_multiaddr; + struct in_addr imr_address; + int imr_ifindex; +}; + +struct ip_mreq_source { + __be32 imr_multiaddr; + __be32 imr_interface; + __be32 imr_sourceaddr; +}; + +struct ip_msfilter { + __be32 imsf_multiaddr; + __be32 imsf_interface; + __u32 imsf_fmode; + __u32 imsf_numsrc; + __be32 imsf_slist[1]; +}; + + + + + +struct group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group; +}; + +struct group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group; + struct __kernel_sockaddr_storage gsr_source; +}; + +struct group_filter { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group; + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist[1]; +}; + + + + + + + +struct in_pktinfo { + int ipi_ifindex; + struct in_addr ipi_spec_dst; + struct in_addr ipi_addr; +}; + + + + + +struct sockaddr_in { + __kernel_sa_family_t sin_family; + __be16 sin_port; + struct in_addr sin_addr; + + + unsigned char __pad[16 - sizeof(short int) - + sizeof(unsigned short int) - sizeof(struct in_addr)]; +}; +# 20 "./include/linux/in.h" 2 + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int proto_ports_offset(int proto) +{ + switch (proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + case IPPROTO_DCCP: + case IPPROTO_ESP: + case IPPROTO_SCTP: + case IPPROTO_UDPLITE: + return 0; + case IPPROTO_AH: + return 4; + default: + return -22; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_loopback(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xff000000))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0x7f000000)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_multicast(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xf0000000))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0xe0000000)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_local_multicast(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xffffff00))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0xe0000000)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_lbcast(__be32 addr) +{ + + return addr == (( __be32)(__u32)__builtin_bswap32((__u32)((((unsigned long int) 0xffffffff))))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_all_snoopers(__be32 addr) +{ + return addr == (( __be32)(__u32)__builtin_bswap32((__u32)((0xe000006aU)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_zeronet(__be32 addr) +{ + return (addr == 0); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_private_10(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xff000000))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0x0a000000)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_private_172(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xfff00000))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0xac100000)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_private_192(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xffff0000))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0xc0a80000)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_linklocal_169(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xffff0000))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0xa9fe0000)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_anycast_6to4(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xffffff00))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0xc0586300)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_test_192(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xffffff00))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0xc0000200)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ipv4_is_test_198(__be32 addr) +{ + return (addr & (( __be32)(__u32)__builtin_bswap32((__u32)((0xfffe0000))))) == (( __be32)(__u32)__builtin_bswap32((__u32)((0xc6120000)))); +} +# 8 "./include/uapi/linux/netfilter.h" 2 +# 42 "./include/uapi/linux/netfilter.h" +enum nf_inet_hooks { + NF_INET_PRE_ROUTING, + NF_INET_LOCAL_IN, + NF_INET_FORWARD, + NF_INET_LOCAL_OUT, + NF_INET_POST_ROUTING, + NF_INET_NUMHOOKS +}; + +enum nf_dev_hooks { + NF_NETDEV_INGRESS, + NF_NETDEV_NUMHOOKS +}; + +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_INET = 1, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_NETDEV = 5, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_DECNET = 12, + NFPROTO_NUMPROTO, +}; + +union nf_inet_addr { + __u32 all[4]; + __be32 ip; + __be32 ip6[4]; + struct in_addr in; + struct in6_addr in6; +}; +# 6 "./include/linux/netfilter_defs.h" 2 +# 6 "./include/net/netns/netfilter.h" 2 + +struct proc_dir_entry; +struct nf_logger; +struct nf_queue_handler; + +struct netns_nf { + + struct proc_dir_entry *proc_netfilter; + + const struct nf_queue_handler *queue_handler; + const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO]; + + struct ctl_table_header *nf_log_dir_header; + + struct nf_hook_entries *hooks_ipv4[NF_INET_NUMHOOKS]; + struct nf_hook_entries *hooks_ipv6[NF_INET_NUMHOOKS]; + + struct nf_hook_entries *hooks_arp[3]; + + + struct nf_hook_entries *hooks_bridge[NF_INET_NUMHOOKS]; + + + struct nf_hook_entries *hooks_decnet[7]; + + + bool defrag_ipv4; + + + bool defrag_ipv6; + +}; +# 27 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/x_tables.h" 1 + + + + + + + +struct ebt_table; + +struct netns_xt { + struct list_head tables[NFPROTO_NUMPROTO]; + bool notrack_deprecated_warning; + bool clusterip_deprecated_warning; + + + struct ebt_table *broute_table; + struct ebt_table *frame_filter; + struct ebt_table *frame_nat; + +}; +# 28 "./include/net/net_namespace.h" 2 + +# 1 "./include/net/netns/conntrack.h" 1 +# 9 "./include/net/netns/conntrack.h" +# 1 "./include/linux/netfilter/nf_conntrack_tcp.h" 1 + + + + +# 1 "./include/uapi/linux/netfilter/nf_conntrack_tcp.h" 1 +# 9 "./include/uapi/linux/netfilter/nf_conntrack_tcp.h" +enum tcp_conntrack { + TCP_CONNTRACK_NONE, + TCP_CONNTRACK_SYN_SENT, + TCP_CONNTRACK_SYN_RECV, + TCP_CONNTRACK_ESTABLISHED, + TCP_CONNTRACK_FIN_WAIT, + TCP_CONNTRACK_CLOSE_WAIT, + TCP_CONNTRACK_LAST_ACK, + TCP_CONNTRACK_TIME_WAIT, + TCP_CONNTRACK_CLOSE, + TCP_CONNTRACK_LISTEN, + + TCP_CONNTRACK_MAX, + TCP_CONNTRACK_IGNORE, + TCP_CONNTRACK_RETRANS, + TCP_CONNTRACK_UNACK, + TCP_CONNTRACK_TIMEOUT_MAX +}; +# 52 "./include/uapi/linux/netfilter/nf_conntrack_tcp.h" +struct nf_ct_tcp_flags { + __u8 flags; + __u8 mask; +}; +# 6 "./include/linux/netfilter/nf_conntrack_tcp.h" 2 + + +struct ip_ct_tcp_state { + u_int32_t td_end; + u_int32_t td_maxend; + u_int32_t td_maxwin; + u_int32_t td_maxack; + u_int8_t td_scale; + u_int8_t flags; +}; + +struct ip_ct_tcp { + struct ip_ct_tcp_state seen[2]; + u_int8_t state; + + u_int8_t last_dir; + u_int8_t retrans; + u_int8_t last_index; + u_int32_t last_seq; + u_int32_t last_ack; + u_int32_t last_end; + u_int16_t last_win; + + u_int8_t last_wscale; + u_int8_t last_flags; +}; +# 10 "./include/net/netns/conntrack.h" 2 + +# 1 "./include/linux/netfilter/nf_conntrack_dccp.h" 1 + + + + + +enum ct_dccp_states { + CT_DCCP_NONE, + CT_DCCP_REQUEST, + CT_DCCP_RESPOND, + CT_DCCP_PARTOPEN, + CT_DCCP_OPEN, + CT_DCCP_CLOSEREQ, + CT_DCCP_CLOSING, + CT_DCCP_TIMEWAIT, + CT_DCCP_IGNORE, + CT_DCCP_INVALID, + __CT_DCCP_MAX +}; + + +enum ct_dccp_roles { + CT_DCCP_ROLE_CLIENT, + CT_DCCP_ROLE_SERVER, + __CT_DCCP_ROLE_MAX +}; + + +# 1 "./include/uapi/linux/netfilter/nf_conntrack_tuple_common.h" 1 +# 11 "./include/uapi/linux/netfilter/nf_conntrack_tuple_common.h" +enum ip_conntrack_dir { + IP_CT_DIR_ORIGINAL, + IP_CT_DIR_REPLY, + IP_CT_DIR_MAX +}; + + + + +union nf_conntrack_man_proto { + + __be16 all; + + struct { + __be16 port; + } tcp; + struct { + __be16 port; + } udp; + struct { + __be16 id; + } icmp; + struct { + __be16 port; + } dccp; + struct { + __be16 port; + } sctp; + struct { + __be16 key; + } gre; +}; +# 29 "./include/linux/netfilter/nf_conntrack_dccp.h" 2 + +struct nf_ct_dccp { + u_int8_t role[IP_CT_DIR_MAX]; + u_int8_t state; + u_int8_t last_pkt; + u_int8_t last_dir; + u_int64_t handshake_seq; +}; +# 12 "./include/net/netns/conntrack.h" 2 + + +# 1 "./include/linux/netfilter/nf_conntrack_sctp.h" 1 + + + + + +# 1 "./include/uapi/linux/netfilter/nf_conntrack_sctp.h" 1 + + + + + + + +enum sctp_conntrack { + SCTP_CONNTRACK_NONE, + SCTP_CONNTRACK_CLOSED, + SCTP_CONNTRACK_COOKIE_WAIT, + SCTP_CONNTRACK_COOKIE_ECHOED, + SCTP_CONNTRACK_ESTABLISHED, + SCTP_CONNTRACK_SHUTDOWN_SENT, + SCTP_CONNTRACK_SHUTDOWN_RECD, + SCTP_CONNTRACK_SHUTDOWN_ACK_SENT, + SCTP_CONNTRACK_HEARTBEAT_SENT, + SCTP_CONNTRACK_HEARTBEAT_ACKED, + SCTP_CONNTRACK_MAX +}; +# 7 "./include/linux/netfilter/nf_conntrack_sctp.h" 2 + +struct ip_ct_sctp { + enum sctp_conntrack state; + + __be32 vtag[IP_CT_DIR_MAX]; +}; +# 15 "./include/net/netns/conntrack.h" 2 + + + +struct ctl_table_header; +struct nf_conntrack_ecache; + +struct nf_generic_net { + unsigned int timeout; +}; + +struct nf_tcp_net { + unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX]; + int tcp_loose; + int tcp_be_liberal; + int tcp_max_retrans; +}; + +enum udp_conntrack { + UDP_CT_UNREPLIED, + UDP_CT_REPLIED, + UDP_CT_MAX +}; + +struct nf_udp_net { + unsigned int timeouts[UDP_CT_MAX]; +}; + +struct nf_icmp_net { + unsigned int timeout; +}; + + +struct nf_dccp_net { + int dccp_loose; + unsigned int dccp_timeout[(__CT_DCCP_MAX - 1) + 1]; +}; + + + +struct nf_sctp_net { + unsigned int timeouts[SCTP_CONNTRACK_MAX]; +}; + + + +enum gre_conntrack { + GRE_CT_UNREPLIED, + GRE_CT_REPLIED, + GRE_CT_MAX +}; + +struct nf_gre_net { + struct list_head keymap_list; + unsigned int timeouts[GRE_CT_MAX]; +}; + + +struct nf_ip_net { + struct nf_generic_net generic; + struct nf_tcp_net tcp; + struct nf_udp_net udp; + struct nf_icmp_net icmp; + struct nf_icmp_net icmpv6; + + struct nf_dccp_net dccp; + + + struct nf_sctp_net sctp; + + + struct nf_gre_net gre; + +}; + +struct ct_pcpu { + spinlock_t lock; + struct hlist_nulls_head unconfirmed; + struct hlist_nulls_head dying; +}; + +struct netns_ct { + atomic_t count; + unsigned int expect_count; + + struct delayed_work ecache_dwork; + bool ecache_dwork_pending; + + bool auto_assign_helper_warned; + + struct ctl_table_header *sysctl_header; + + unsigned int sysctl_log_invalid; + int sysctl_events; + int sysctl_acct; + int sysctl_auto_assign_helper; + int sysctl_tstamp; + int sysctl_checksum; + + struct ct_pcpu *pcpu_lists; + struct ip_conntrack_stat *stat; + struct nf_ct_event_notifier *nf_conntrack_event_cb; + struct nf_exp_event_notifier *nf_expect_event_cb; + struct nf_ip_net nf_ct_proto; + + unsigned int labels_used; + +}; +# 30 "./include/net/net_namespace.h" 2 + +# 1 "./include/net/netns/nftables.h" 1 + + + + + + +struct netns_nftables { + struct list_head tables; + struct list_head commit_list; + struct list_head module_list; + struct mutex commit_mutex; + unsigned int base_seq; + u8 gencursor; + u8 validate_state; +}; +# 32 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/xfrm.h" 1 +# 9 "./include/net/netns/xfrm.h" +# 1 "./include/uapi/linux/xfrm.h" 1 +# 15 "./include/uapi/linux/xfrm.h" +typedef union { + __be32 a4; + __be32 a6[4]; + struct in6_addr in6; +} xfrm_address_t; + + + + + +struct xfrm_id { + xfrm_address_t daddr; + __be32 spi; + __u8 proto; +}; + +struct xfrm_sec_ctx { + __u8 ctx_doi; + __u8 ctx_alg; + __u16 ctx_len; + __u32 ctx_sid; + char ctx_str[0]; +}; +# 49 "./include/uapi/linux/xfrm.h" +struct xfrm_selector { + xfrm_address_t daddr; + xfrm_address_t saddr; + __be16 dport; + __be16 dport_mask; + __be16 sport; + __be16 sport_mask; + __u16 family; + __u8 prefixlen_d; + __u8 prefixlen_s; + __u8 proto; + int ifindex; + __kernel_uid32_t user; +}; + + + +struct xfrm_lifetime_cfg { + __u64 soft_byte_limit; + __u64 hard_byte_limit; + __u64 soft_packet_limit; + __u64 hard_packet_limit; + __u64 soft_add_expires_seconds; + __u64 hard_add_expires_seconds; + __u64 soft_use_expires_seconds; + __u64 hard_use_expires_seconds; +}; + +struct xfrm_lifetime_cur { + __u64 bytes; + __u64 packets; + __u64 add_time; + __u64 use_time; +}; + +struct xfrm_replay_state { + __u32 oseq; + __u32 seq; + __u32 bitmap; +}; + + + +struct xfrm_replay_state_esn { + unsigned int bmp_len; + __u32 oseq; + __u32 seq; + __u32 oseq_hi; + __u32 seq_hi; + __u32 replay_window; + __u32 bmp[0]; +}; + +struct xfrm_algo { + char alg_name[64]; + unsigned int alg_key_len; + char alg_key[0]; +}; + +struct xfrm_algo_auth { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_trunc_len; + char alg_key[0]; +}; + +struct xfrm_algo_aead { + char alg_name[64]; + unsigned int alg_key_len; + unsigned int alg_icv_len; + char alg_key[0]; +}; + +struct xfrm_stats { + __u32 replay_window; + __u32 replay; + __u32 integrity_failed; +}; + +enum { + XFRM_POLICY_TYPE_MAIN = 0, + XFRM_POLICY_TYPE_SUB = 1, + XFRM_POLICY_TYPE_MAX = 2, + XFRM_POLICY_TYPE_ANY = 255 +}; + +enum { + XFRM_POLICY_IN = 0, + XFRM_POLICY_OUT = 1, + XFRM_POLICY_FWD = 2, + XFRM_POLICY_MASK = 3, + XFRM_POLICY_MAX = 3 +}; + +enum { + XFRM_SHARE_ANY, + XFRM_SHARE_SESSION, + XFRM_SHARE_USER, + XFRM_SHARE_UNIQUE +}; +# 158 "./include/uapi/linux/xfrm.h" +enum { + XFRM_MSG_BASE = 0x10, + + XFRM_MSG_NEWSA = 0x10, + + XFRM_MSG_DELSA, + + XFRM_MSG_GETSA, + + + XFRM_MSG_NEWPOLICY, + + XFRM_MSG_DELPOLICY, + + XFRM_MSG_GETPOLICY, + + + XFRM_MSG_ALLOCSPI, + + XFRM_MSG_ACQUIRE, + + XFRM_MSG_EXPIRE, + + + XFRM_MSG_UPDPOLICY, + + XFRM_MSG_UPDSA, + + + XFRM_MSG_POLEXPIRE, + + + XFRM_MSG_FLUSHSA, + + XFRM_MSG_FLUSHPOLICY, + + + XFRM_MSG_NEWAE, + + XFRM_MSG_GETAE, + + + XFRM_MSG_REPORT, + + + XFRM_MSG_MIGRATE, + + + XFRM_MSG_NEWSADINFO, + + XFRM_MSG_GETSADINFO, + + + XFRM_MSG_NEWSPDINFO, + + XFRM_MSG_GETSPDINFO, + + + XFRM_MSG_MAPPING, + + __XFRM_MSG_MAX +}; +# 228 "./include/uapi/linux/xfrm.h" +struct xfrm_user_sec_ctx { + __u16 len; + __u16 exttype; + __u8 ctx_alg; + __u8 ctx_doi; + __u16 ctx_len; +}; + +struct xfrm_user_tmpl { + struct xfrm_id id; + __u16 family; + xfrm_address_t saddr; + __u32 reqid; + __u8 mode; + __u8 share; + __u8 optional; + __u32 aalgos; + __u32 ealgos; + __u32 calgos; +}; + +struct xfrm_encap_tmpl { + __u16 encap_type; + __be16 encap_sport; + __be16 encap_dport; + xfrm_address_t encap_oa; +}; + + +enum xfrm_ae_ftype_t { + XFRM_AE_UNSPEC, + XFRM_AE_RTHR=1, + XFRM_AE_RVAL=2, + XFRM_AE_LVAL=4, + XFRM_AE_ETHR=8, + XFRM_AE_CR=16, + XFRM_AE_CE=32, + XFRM_AE_CU=64, + __XFRM_AE_MAX + + +}; + +struct xfrm_userpolicy_type { + __u8 type; + __u16 reserved1; + __u8 reserved2; +}; + + +enum xfrm_attr_type_t { + XFRMA_UNSPEC, + XFRMA_ALG_AUTH, + XFRMA_ALG_CRYPT, + XFRMA_ALG_COMP, + XFRMA_ENCAP, + XFRMA_TMPL, + XFRMA_SA, + XFRMA_POLICY, + XFRMA_SEC_CTX, + XFRMA_LTIME_VAL, + XFRMA_REPLAY_VAL, + XFRMA_REPLAY_THRESH, + XFRMA_ETIMER_THRESH, + XFRMA_SRCADDR, + XFRMA_COADDR, + XFRMA_LASTUSED, + XFRMA_POLICY_TYPE, + XFRMA_MIGRATE, + XFRMA_ALG_AEAD, + XFRMA_KMADDRESS, + XFRMA_ALG_AUTH_TRUNC, + XFRMA_MARK, + XFRMA_TFCPAD, + XFRMA_REPLAY_ESN_VAL, + XFRMA_SA_EXTRA_FLAGS, + XFRMA_PROTO, + XFRMA_ADDRESS_FILTER, + XFRMA_PAD, + XFRMA_OFFLOAD_DEV, + XFRMA_SET_MARK, + XFRMA_SET_MARK_MASK, + XFRMA_IF_ID, + __XFRMA_MAX + + + +}; + +struct xfrm_mark { + __u32 v; + __u32 m; +}; + +enum xfrm_sadattr_type_t { + XFRMA_SAD_UNSPEC, + XFRMA_SAD_CNT, + XFRMA_SAD_HINFO, + __XFRMA_SAD_MAX + + +}; + +struct xfrmu_sadhinfo { + __u32 sadhcnt; + __u32 sadhmcnt; +}; + +enum xfrm_spdattr_type_t { + XFRMA_SPD_UNSPEC, + XFRMA_SPD_INFO, + XFRMA_SPD_HINFO, + XFRMA_SPD_IPV4_HTHRESH, + XFRMA_SPD_IPV6_HTHRESH, + __XFRMA_SPD_MAX + + +}; + +struct xfrmu_spdinfo { + __u32 incnt; + __u32 outcnt; + __u32 fwdcnt; + __u32 inscnt; + __u32 outscnt; + __u32 fwdscnt; +}; + +struct xfrmu_spdhinfo { + __u32 spdhcnt; + __u32 spdhmcnt; +}; + +struct xfrmu_spdhthresh { + __u8 lbits; + __u8 rbits; +}; + +struct xfrm_usersa_info { + struct xfrm_selector sel; + struct xfrm_id id; + xfrm_address_t saddr; + struct xfrm_lifetime_cfg lft; + struct xfrm_lifetime_cur curlft; + struct xfrm_stats stats; + __u32 seq; + __u32 reqid; + __u16 family; + __u8 mode; + __u8 replay_window; + __u8 flags; +# 387 "./include/uapi/linux/xfrm.h" +}; + + + +struct xfrm_usersa_id { + xfrm_address_t daddr; + __be32 spi; + __u16 family; + __u8 proto; +}; + +struct xfrm_aevent_id { + struct xfrm_usersa_id sa_id; + xfrm_address_t saddr; + __u32 flags; + __u32 reqid; +}; + +struct xfrm_userspi_info { + struct xfrm_usersa_info info; + __u32 min; + __u32 max; +}; + +struct xfrm_userpolicy_info { + struct xfrm_selector sel; + struct xfrm_lifetime_cfg lft; + struct xfrm_lifetime_cur curlft; + __u32 priority; + __u32 index; + __u8 dir; + __u8 action; + + + __u8 flags; + + + + __u8 share; +}; + +struct xfrm_userpolicy_id { + struct xfrm_selector sel; + __u32 index; + __u8 dir; +}; + +struct xfrm_user_acquire { + struct xfrm_id id; + xfrm_address_t saddr; + struct xfrm_selector sel; + struct xfrm_userpolicy_info policy; + __u32 aalgos; + __u32 ealgos; + __u32 calgos; + __u32 seq; +}; + +struct xfrm_user_expire { + struct xfrm_usersa_info state; + __u8 hard; +}; + +struct xfrm_user_polexpire { + struct xfrm_userpolicy_info pol; + __u8 hard; +}; + +struct xfrm_usersa_flush { + __u8 proto; +}; + +struct xfrm_user_report { + __u8 proto; + struct xfrm_selector sel; +}; + + + +struct xfrm_user_kmaddress { + xfrm_address_t local; + xfrm_address_t remote; + __u32 reserved; + __u16 family; +}; + +struct xfrm_user_migrate { + xfrm_address_t old_daddr; + xfrm_address_t old_saddr; + xfrm_address_t new_daddr; + xfrm_address_t new_saddr; + __u8 proto; + __u8 mode; + __u16 reserved; + __u32 reqid; + __u16 old_family; + __u16 new_family; +}; + +struct xfrm_user_mapping { + struct xfrm_usersa_id id; + __u32 reqid; + xfrm_address_t old_saddr; + xfrm_address_t new_saddr; + __be16 old_sport; + __be16 new_sport; +}; + +struct xfrm_address_filter { + xfrm_address_t saddr; + xfrm_address_t daddr; + __u16 family; + __u8 splen; + __u8 dplen; +}; + +struct xfrm_user_offload { + int ifindex; + __u8 flags; +}; +# 519 "./include/uapi/linux/xfrm.h" +enum xfrm_nlgroups { + XFRMNLGRP_NONE, + + XFRMNLGRP_ACQUIRE, + + XFRMNLGRP_EXPIRE, + + XFRMNLGRP_SA, + + XFRMNLGRP_POLICY, + + XFRMNLGRP_AEVENTS, + + XFRMNLGRP_REPORT, + + XFRMNLGRP_MIGRATE, + + XFRMNLGRP_MAPPING, + + __XFRMNLGRP_MAX +}; +# 10 "./include/net/netns/xfrm.h" 2 + + +struct ctl_table_header; + +struct xfrm_policy_hash { + struct hlist_head *table; + unsigned int hmask; + u8 dbits4; + u8 sbits4; + u8 dbits6; + u8 sbits6; +}; + +struct xfrm_policy_hthresh { + struct work_struct work; + seqlock_t lock; + u8 lbits4; + u8 rbits4; + u8 lbits6; + u8 rbits6; +}; + +struct netns_xfrm { + struct list_head state_all; +# 42 "./include/net/netns/xfrm.h" + struct hlist_head *state_bydst; + struct hlist_head *state_bysrc; + struct hlist_head *state_byspi; + unsigned int state_hmask; + unsigned int state_num; + struct work_struct state_hash_work; + + struct list_head policy_all; + struct hlist_head *policy_byidx; + unsigned int policy_idx_hmask; + struct hlist_head policy_inexact[XFRM_POLICY_MAX]; + struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX]; + unsigned int policy_count[XFRM_POLICY_MAX * 2]; + struct work_struct policy_hash_work; + struct xfrm_policy_hthresh policy_hthresh; + struct list_head inexact_bins; + + + struct sock *nlsk; + struct sock *nlsk_stash; + + u32 sysctl_aevent_etime; + u32 sysctl_aevent_rseqth; + int sysctl_larval_drop; + u32 sysctl_acq_expires; + + struct ctl_table_header *sysctl_hdr; + + + struct dst_ops xfrm4_dst_ops; + + struct dst_ops xfrm6_dst_ops; + + spinlock_t xfrm_state_lock; + spinlock_t xfrm_policy_lock; + struct mutex xfrm_cfg_mutex; +}; +# 33 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/mpls.h" 1 +# 9 "./include/net/netns/mpls.h" +struct mpls_route; +struct ctl_table_header; + +struct netns_mpls { + int ip_ttl_propagate; + int default_ttl; + size_t platform_labels; + struct mpls_route * *platform_label; + + struct ctl_table_header *ctl; +}; +# 34 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/can.h" 1 +# 11 "./include/net/netns/can.h" +struct can_dev_rcv_lists; +struct can_pkg_stats; +struct can_rcv_lists_stats; + +struct netns_can { + + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *pde_version; + struct proc_dir_entry *pde_stats; + struct proc_dir_entry *pde_reset_stats; + struct proc_dir_entry *pde_rcvlist_all; + struct proc_dir_entry *pde_rcvlist_fil; + struct proc_dir_entry *pde_rcvlist_inv; + struct proc_dir_entry *pde_rcvlist_sff; + struct proc_dir_entry *pde_rcvlist_eff; + struct proc_dir_entry *pde_rcvlist_err; + struct proc_dir_entry *bcmproc_dir; + + + + struct can_dev_rcv_lists *rx_alldev_list; + spinlock_t rcvlists_lock; + struct timer_list stattimer; + struct can_pkg_stats *pkg_stats; + struct can_rcv_lists_stats *rcv_lists_stats; + + + struct hlist_head cgw_list; +}; +# 35 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/xdp.h" 1 + + + + + + + +struct netns_xdp { + struct mutex lock; + struct hlist_head list; +}; +# 36 "./include/net/net_namespace.h" 2 +# 1 "./include/net/netns/bpf.h" 1 +# 9 "./include/net/netns/bpf.h" +# 1 "./include/linux/bpf-netns.h" 1 + + + + + + + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + MAX_NETNS_BPF_ATTACH_TYPE +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) enum netns_bpf_attach_type +to_netns_bpf_attach_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + case BPF_FLOW_DISSECTOR: + return NETNS_BPF_FLOW_DISSECTOR; + default: + return NETNS_BPF_INVALID; + } +} + + +extern struct mutex netns_bpf_mutex; + +union bpf_attr; +struct bpf_prog; + + +int netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr *uattr); +int netns_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog); +int netns_bpf_prog_detach(const union bpf_attr *attr); +int netns_bpf_link_create(const union bpf_attr *attr, + struct bpf_prog *prog); +# 10 "./include/net/netns/bpf.h" 2 + +struct bpf_prog; + +struct netns_bpf { + struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE]; + struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE]; +}; +# 37 "./include/net/net_namespace.h" 2 + + + + + +struct user_namespace; +struct proc_dir_entry; +struct net_device; +struct sock; +struct ctl_table_header; +struct net_generic; +struct uevent_sock; +struct netns_ipvs; +struct bpf_prog; + + + + + +struct net { + + + + refcount_t passive; + + + refcount_t count; + + + spinlock_t rules_mod_lock; + + unsigned int dev_unreg_count; + + unsigned int dev_base_seq; + int ifindex; + + spinlock_t nsid_lock; + atomic_t fnhe_genid; + + struct list_head list; + struct list_head exit_list; + + + + + + struct llist_node cleanup_list; + + + struct key_tag *key_domain; + + struct user_namespace *user_ns; + struct ucounts *ucounts; + struct idr netns_ids; + + struct ns_common ns; + + struct list_head dev_base_head; + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + + + struct ctl_table_set sysctls; + + + struct sock *rtnl; + struct sock *genl_sock; + + struct uevent_sock *uevent_sock; + + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; + struct raw_notifier_head netdev_chain; + + + + + u32 hash_mix; + + struct net_device *loopback_dev; + + + struct list_head rules_ops; + + struct netns_core core; + struct netns_mib mib; + struct netns_packet packet; + struct netns_unix unx; + struct netns_nexthop nexthop; + struct netns_ipv4 ipv4; + + struct netns_ipv6 ipv6; + + + struct netns_ieee802154_lowpan ieee802154_lowpan; + + + struct netns_sctp sctp; + + + struct netns_dccp dccp; + + + struct netns_nf nf; + struct netns_xt xt; + + struct netns_ct ct; + + + struct netns_nftables nft; + + + struct netns_nf_frag nf_frag; + struct ctl_table_header *nf_frag_frags_hdr; + + struct sock *nfnl; + struct sock *nfnl_stash; + + struct list_head nfnl_acct_list; + + + struct list_head nfct_timeout_list; + + + + struct sk_buff_head wext_nlevents; + + struct net_generic *gen; + + + struct netns_bpf bpf; + + + + struct netns_xfrm xfrm; + + + atomic64_t net_cookie; + + + struct netns_ipvs *ipvs; + + + struct netns_mpls mpls; + + + struct netns_can can; + + + struct netns_xdp xdp; + + + struct sock *crypto_nlsk; + + struct sock *diag_nlsk; +} __attribute__((__designated_init__)); + +# 1 "./include/linux/seq_file_net.h" 1 + + + + + + +struct net; +extern struct net init_net; + +struct seq_net_private { + + struct net *net; + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net *seq_file_net(struct seq_file *seq) +{ + + return ((struct seq_net_private *)seq->private)->net; + + + +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net *seq_file_single_net(struct seq_file *seq) +{ + + return (struct net *)seq->private; + + + +} +# 195 "./include/net/net_namespace.h" 2 + + +extern struct net init_net; + + +struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, + struct net *old_net); + +void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid); + +void net_ns_barrier(void); +# 228 "./include/net/net_namespace.h" +extern struct list_head net_namespace_list; + +struct net *get_net_ns_by_pid(pid_t pid); +struct net *get_net_ns_by_fd(int fd); + +u64 net_gen_cookie(struct net *net); + + +void ipx_register_sysctl(void); +void ipx_unregister_sysctl(void); + + + + + + +void __put_net(struct net *net); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net *get_net(struct net *net) +{ + refcount_inc(&net->count); + return net; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net *maybe_get_net(struct net *net) +{ + + + + + + if (!refcount_inc_not_zero(&net->count)) + net = ((void *)0); + return net; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_net(struct net *net) +{ + if (refcount_dec_and_test(&net->count)) + __put_net(net); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +int net_eq(const struct net *net1, const struct net *net2) +{ + return net1 == net2; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int check_net(const struct net *net) +{ + return refcount_read(&net->count) != 0; +} + +void net_drop_ns(void *); +# 314 "./include/net/net_namespace.h" +typedef struct { + + struct net *net; + +} possible_net_t; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void write_pnet(possible_net_t *pnet, struct net *net) +{ + + pnet->net = net; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net *read_pnet(const possible_net_t *pnet) +{ + + return pnet->net; + + + +} +# 356 "./include/net/net_namespace.h" +int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp); +int peernet2id(const struct net *net, struct net *peer); +bool peernet_has_id(const struct net *net, struct net *peer); +struct net *get_net_ns_by_id(const struct net *net, int id); + +struct pernet_operations { + struct list_head list; +# 385 "./include/net/net_namespace.h" + int (*init)(struct net *net); + void (*pre_exit)(struct net *net); + void (*exit)(struct net *net); + void (*exit_batch)(struct list_head *net_exit_list); + unsigned int *id; + size_t size; +}; +# 412 "./include/net/net_namespace.h" +int register_pernet_subsys(struct pernet_operations *); +void unregister_pernet_subsys(struct pernet_operations *); +int register_pernet_device(struct pernet_operations *); +void unregister_pernet_device(struct pernet_operations *); + +struct ctl_table; +struct ctl_table_header; + + +int net_sysctl_init(void); +struct ctl_table_header *register_net_sysctl(struct net *net, const char *path, + struct ctl_table *table); +void unregister_net_sysctl_table(struct ctl_table_header *header); +# 437 "./include/net/net_namespace.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rt_genid_ipv4(const struct net *net) +{ + return atomic_read(&net->ipv4.rt_genid); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rt_genid_ipv6(const struct net *net) +{ + return atomic_read(&net->ipv6.fib6_sernum); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rt_genid_bump_ipv4(struct net *net) +{ + atomic_inc(&net->ipv4.rt_genid); +} + +extern void (*__fib6_flush_trees)(struct net *net); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rt_genid_bump_ipv6(struct net *net) +{ + if (__fib6_flush_trees) + __fib6_flush_trees(net); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct netns_ieee802154_lowpan * +net_ieee802154_lowpan(struct net *net) +{ + return &net->ieee802154_lowpan; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rt_genid_bump_all(struct net *net) +{ + rt_genid_bump_ipv4(net); + rt_genid_bump_ipv6(net); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fnhe_genid(const struct net *net) +{ + return atomic_read(&net->fnhe_genid); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fnhe_genid_bump(struct net *net) +{ + atomic_inc(&net->fnhe_genid); +} +# 39 "./include/linux/netdevice.h" 2 + +# 1 "./include/net/dcbnl.h" 1 +# 11 "./include/net/dcbnl.h" +# 1 "./include/uapi/linux/dcbnl.h" 1 +# 58 "./include/uapi/linux/dcbnl.h" +struct ieee_ets { + __u8 willing; + __u8 ets_cap; + __u8 cbs; + __u8 tc_tx_bw[8]; + __u8 tc_rx_bw[8]; + __u8 tc_tsa[8]; + __u8 prio_tc[8]; + __u8 tc_reco_bw[8]; + __u8 tc_reco_tsa[8]; + __u8 reco_prio_tc[8]; +}; +# 78 "./include/uapi/linux/dcbnl.h" +struct ieee_maxrate { + __u64 tc_maxrate[8]; +}; + +enum dcbnl_cndd_states { + DCB_CNDD_RESET = 0, + DCB_CNDD_EDGE, + DCB_CNDD_INTERIOR, + DCB_CNDD_INTERIOR_READY, +}; +# 119 "./include/uapi/linux/dcbnl.h" +struct ieee_qcn { + __u8 rpg_enable[8]; + __u32 rppp_max_rps[8]; + __u32 rpg_time_reset[8]; + __u32 rpg_byte_reset[8]; + __u32 rpg_threshold[8]; + __u32 rpg_max_rate[8]; + __u32 rpg_ai_rate[8]; + __u32 rpg_hai_rate[8]; + __u32 rpg_gd[8]; + __u32 rpg_min_dec_fac[8]; + __u32 rpg_min_rate[8]; + __u32 cndd_state_machine[8]; +}; +# 141 "./include/uapi/linux/dcbnl.h" +struct ieee_qcn_stats { + __u64 rppp_rp_centiseconds[8]; + __u32 rppp_created_rps[8]; +}; +# 157 "./include/uapi/linux/dcbnl.h" +struct ieee_pfc { + __u8 pfc_cap; + __u8 pfc_en; + __u8 mbc; + __u16 delay; + __u64 requests[8]; + __u64 indications[8]; +}; + + + +struct dcbnl_buffer { + + __u8 prio2buffer[8]; + + __u32 buffer_size[8]; + __u32 total_size; +}; +# 190 "./include/uapi/linux/dcbnl.h" +struct cee_pg { + __u8 willing; + __u8 error; + __u8 pg_en; + __u8 tcs_supported; + __u8 pg_bw[8]; + __u8 prio_pg[8]; +}; +# 207 "./include/uapi/linux/dcbnl.h" +struct cee_pfc { + __u8 willing; + __u8 error; + __u8 pfc_en; + __u8 tcs_supported; +}; +# 244 "./include/uapi/linux/dcbnl.h" +struct dcb_app { + __u8 selector; + __u8 priority; + __u16 protocol; +}; +# 259 "./include/uapi/linux/dcbnl.h" +struct dcb_peer_app_info { + __u8 willing; + __u8 error; +}; + +struct dcbmsg { + __u8 dcb_family; + __u8 cmd; + __u16 dcb_pad; +}; +# 301 "./include/uapi/linux/dcbnl.h" +enum dcbnl_commands { + DCB_CMD_UNDEFINED, + + DCB_CMD_GSTATE, + DCB_CMD_SSTATE, + + DCB_CMD_PGTX_GCFG, + DCB_CMD_PGTX_SCFG, + DCB_CMD_PGRX_GCFG, + DCB_CMD_PGRX_SCFG, + + DCB_CMD_PFC_GCFG, + DCB_CMD_PFC_SCFG, + + DCB_CMD_SET_ALL, + + DCB_CMD_GPERM_HWADDR, + + DCB_CMD_GCAP, + + DCB_CMD_GNUMTCS, + DCB_CMD_SNUMTCS, + + DCB_CMD_PFC_GSTATE, + DCB_CMD_PFC_SSTATE, + + DCB_CMD_BCN_GCFG, + DCB_CMD_BCN_SCFG, + + DCB_CMD_GAPP, + DCB_CMD_SAPP, + + DCB_CMD_IEEE_SET, + DCB_CMD_IEEE_GET, + + DCB_CMD_GDCBX, + DCB_CMD_SDCBX, + + DCB_CMD_GFEATCFG, + DCB_CMD_SFEATCFG, + + DCB_CMD_CEE_GET, + DCB_CMD_IEEE_DEL, + + __DCB_CMD_ENUM_MAX, + DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1, +}; +# 369 "./include/uapi/linux/dcbnl.h" +enum dcbnl_attrs { + DCB_ATTR_UNDEFINED, + + DCB_ATTR_IFNAME, + DCB_ATTR_STATE, + DCB_ATTR_PFC_STATE, + DCB_ATTR_PFC_CFG, + DCB_ATTR_NUM_TC, + DCB_ATTR_PG_CFG, + DCB_ATTR_SET_ALL, + DCB_ATTR_PERM_HWADDR, + DCB_ATTR_CAP, + DCB_ATTR_NUMTCS, + DCB_ATTR_BCN, + DCB_ATTR_APP, + + + DCB_ATTR_IEEE, + + DCB_ATTR_DCBX, + DCB_ATTR_FEATCFG, + + + DCB_ATTR_CEE, + + __DCB_ATTR_ENUM_MAX, + DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1, +}; +# 409 "./include/uapi/linux/dcbnl.h" +enum ieee_attrs { + DCB_ATTR_IEEE_UNSPEC, + DCB_ATTR_IEEE_ETS, + DCB_ATTR_IEEE_PFC, + DCB_ATTR_IEEE_APP_TABLE, + DCB_ATTR_IEEE_PEER_ETS, + DCB_ATTR_IEEE_PEER_PFC, + DCB_ATTR_IEEE_PEER_APP, + DCB_ATTR_IEEE_MAXRATE, + DCB_ATTR_IEEE_QCN, + DCB_ATTR_IEEE_QCN_STATS, + DCB_ATTR_DCB_BUFFER, + __DCB_ATTR_IEEE_MAX +}; + + +enum ieee_attrs_app { + DCB_ATTR_IEEE_APP_UNSPEC, + DCB_ATTR_IEEE_APP, + __DCB_ATTR_IEEE_APP_MAX +}; +# 447 "./include/uapi/linux/dcbnl.h" +enum cee_attrs { + DCB_ATTR_CEE_UNSPEC, + DCB_ATTR_CEE_PEER_PG, + DCB_ATTR_CEE_PEER_PFC, + DCB_ATTR_CEE_PEER_APP_TABLE, + DCB_ATTR_CEE_TX_PG, + DCB_ATTR_CEE_RX_PG, + DCB_ATTR_CEE_PFC, + DCB_ATTR_CEE_APP_TABLE, + DCB_ATTR_CEE_FEAT, + __DCB_ATTR_CEE_MAX +}; + + +enum peer_app_attr { + DCB_ATTR_CEE_PEER_APP_UNSPEC, + DCB_ATTR_CEE_PEER_APP_INFO, + DCB_ATTR_CEE_PEER_APP, + __DCB_ATTR_CEE_PEER_APP_MAX +}; + + +enum cee_attrs_app { + DCB_ATTR_CEE_APP_UNSPEC, + DCB_ATTR_CEE_APP, + __DCB_ATTR_CEE_APP_MAX +}; +# 492 "./include/uapi/linux/dcbnl.h" +enum dcbnl_pfc_up_attrs { + DCB_PFC_UP_ATTR_UNDEFINED, + + DCB_PFC_UP_ATTR_0, + DCB_PFC_UP_ATTR_1, + DCB_PFC_UP_ATTR_2, + DCB_PFC_UP_ATTR_3, + DCB_PFC_UP_ATTR_4, + DCB_PFC_UP_ATTR_5, + DCB_PFC_UP_ATTR_6, + DCB_PFC_UP_ATTR_7, + DCB_PFC_UP_ATTR_ALL, + + __DCB_PFC_UP_ATTR_ENUM_MAX, + DCB_PFC_UP_ATTR_MAX = __DCB_PFC_UP_ATTR_ENUM_MAX - 1, +}; +# 535 "./include/uapi/linux/dcbnl.h" +enum dcbnl_pg_attrs { + DCB_PG_ATTR_UNDEFINED, + + DCB_PG_ATTR_TC_0, + DCB_PG_ATTR_TC_1, + DCB_PG_ATTR_TC_2, + DCB_PG_ATTR_TC_3, + DCB_PG_ATTR_TC_4, + DCB_PG_ATTR_TC_5, + DCB_PG_ATTR_TC_6, + DCB_PG_ATTR_TC_7, + DCB_PG_ATTR_TC_MAX, + DCB_PG_ATTR_TC_ALL, + + DCB_PG_ATTR_BW_ID_0, + DCB_PG_ATTR_BW_ID_1, + DCB_PG_ATTR_BW_ID_2, + DCB_PG_ATTR_BW_ID_3, + DCB_PG_ATTR_BW_ID_4, + DCB_PG_ATTR_BW_ID_5, + DCB_PG_ATTR_BW_ID_6, + DCB_PG_ATTR_BW_ID_7, + DCB_PG_ATTR_BW_ID_MAX, + DCB_PG_ATTR_BW_ID_ALL, + + __DCB_PG_ATTR_ENUM_MAX, + DCB_PG_ATTR_MAX = __DCB_PG_ATTR_ENUM_MAX - 1, +}; +# 584 "./include/uapi/linux/dcbnl.h" +enum dcbnl_tc_attrs { + DCB_TC_ATTR_PARAM_UNDEFINED, + + DCB_TC_ATTR_PARAM_PGID, + DCB_TC_ATTR_PARAM_UP_MAPPING, + DCB_TC_ATTR_PARAM_STRICT_PRIO, + DCB_TC_ATTR_PARAM_BW_PCT, + DCB_TC_ATTR_PARAM_ALL, + + __DCB_TC_ATTR_PARAM_ENUM_MAX, + DCB_TC_ATTR_PARAM_MAX = __DCB_TC_ATTR_PARAM_ENUM_MAX - 1, +}; +# 618 "./include/uapi/linux/dcbnl.h" +enum dcbnl_cap_attrs { + DCB_CAP_ATTR_UNDEFINED, + DCB_CAP_ATTR_ALL, + DCB_CAP_ATTR_PG, + DCB_CAP_ATTR_PFC, + DCB_CAP_ATTR_UP2TC, + DCB_CAP_ATTR_PG_TCS, + DCB_CAP_ATTR_PFC_TCS, + DCB_CAP_ATTR_GSP, + DCB_CAP_ATTR_BCN, + DCB_CAP_ATTR_DCBX, + + __DCB_CAP_ATTR_ENUM_MAX, + DCB_CAP_ATTR_MAX = __DCB_CAP_ATTR_ENUM_MAX - 1, +}; +# 676 "./include/uapi/linux/dcbnl.h" +enum dcbnl_numtcs_attrs { + DCB_NUMTCS_ATTR_UNDEFINED, + DCB_NUMTCS_ATTR_ALL, + DCB_NUMTCS_ATTR_PG, + DCB_NUMTCS_ATTR_PFC, + + __DCB_NUMTCS_ATTR_ENUM_MAX, + DCB_NUMTCS_ATTR_MAX = __DCB_NUMTCS_ATTR_ENUM_MAX - 1, +}; + +enum dcbnl_bcn_attrs{ + DCB_BCN_ATTR_UNDEFINED = 0, + + DCB_BCN_ATTR_RP_0, + DCB_BCN_ATTR_RP_1, + DCB_BCN_ATTR_RP_2, + DCB_BCN_ATTR_RP_3, + DCB_BCN_ATTR_RP_4, + DCB_BCN_ATTR_RP_5, + DCB_BCN_ATTR_RP_6, + DCB_BCN_ATTR_RP_7, + DCB_BCN_ATTR_RP_ALL, + + DCB_BCN_ATTR_BCNA_0, + DCB_BCN_ATTR_BCNA_1, + DCB_BCN_ATTR_ALPHA, + DCB_BCN_ATTR_BETA, + DCB_BCN_ATTR_GD, + DCB_BCN_ATTR_GI, + DCB_BCN_ATTR_TMAX, + DCB_BCN_ATTR_TD, + DCB_BCN_ATTR_RMIN, + DCB_BCN_ATTR_W, + DCB_BCN_ATTR_RD, + DCB_BCN_ATTR_RU, + DCB_BCN_ATTR_WRTT, + DCB_BCN_ATTR_RI, + DCB_BCN_ATTR_C, + DCB_BCN_ATTR_ALL, + + __DCB_BCN_ATTR_ENUM_MAX, + DCB_BCN_ATTR_MAX = __DCB_BCN_ATTR_ENUM_MAX - 1, +}; + + + + + + + +enum dcb_general_attr_values { + DCB_ATTR_VALUE_UNDEFINED = 0xff +}; + + + +enum dcbnl_app_attrs { + DCB_APP_ATTR_UNDEFINED, + + DCB_APP_ATTR_IDTYPE, + DCB_APP_ATTR_ID, + DCB_APP_ATTR_PRIORITY, + + __DCB_APP_ATTR_ENUM_MAX, + DCB_APP_ATTR_MAX = __DCB_APP_ATTR_ENUM_MAX - 1, +}; +# 758 "./include/uapi/linux/dcbnl.h" +enum dcbnl_featcfg_attrs { + DCB_FEATCFG_ATTR_UNDEFINED, + DCB_FEATCFG_ATTR_ALL, + DCB_FEATCFG_ATTR_PG, + DCB_FEATCFG_ATTR_PFC, + DCB_FEATCFG_ATTR_APP, + + __DCB_FEATCFG_ATTR_ENUM_MAX, + DCB_FEATCFG_ATTR_MAX = __DCB_FEATCFG_ATTR_ENUM_MAX - 1, +}; +# 12 "./include/net/dcbnl.h" 2 + +struct dcb_app_type { + int ifindex; + struct dcb_app app; + struct list_head list; + u8 dcbx; +}; + +int dcb_setapp(struct net_device *, struct dcb_app *); +u8 dcb_getapp(struct net_device *, struct dcb_app *); +int dcb_ieee_setapp(struct net_device *, struct dcb_app *); +int dcb_ieee_delapp(struct net_device *, struct dcb_app *); +u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *); + +struct dcb_ieee_app_prio_map { + u64 map[8]; +}; +void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev, + struct dcb_ieee_app_prio_map *p_map); + +struct dcb_ieee_app_dscp_map { + u8 map[64]; +}; +void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev, + struct dcb_ieee_app_dscp_map *p_map); +u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev); + +int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid); +int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid); + + + + + +struct dcbnl_rtnl_ops { + + int (*ieee_getets) (struct net_device *, struct ieee_ets *); + int (*ieee_setets) (struct net_device *, struct ieee_ets *); + int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *); + int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *); + int (*ieee_getqcn) (struct net_device *, struct ieee_qcn *); + int (*ieee_setqcn) (struct net_device *, struct ieee_qcn *); + int (*ieee_getqcnstats) (struct net_device *, struct ieee_qcn_stats *); + int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *); + int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); + int (*ieee_getapp) (struct net_device *, struct dcb_app *); + int (*ieee_setapp) (struct net_device *, struct dcb_app *); + int (*ieee_delapp) (struct net_device *, struct dcb_app *); + int (*ieee_peer_getets) (struct net_device *, struct ieee_ets *); + int (*ieee_peer_getpfc) (struct net_device *, struct ieee_pfc *); + + + u8 (*getstate)(struct net_device *); + u8 (*setstate)(struct net_device *, u8); + void (*getpermhwaddr)(struct net_device *, u8 *); + void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8); + void (*setpgbwgcfgtx)(struct net_device *, int, u8); + void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8); + void (*setpgbwgcfgrx)(struct net_device *, int, u8); + void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); + void (*getpgbwgcfgtx)(struct net_device *, int, u8 *); + void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *); + void (*getpgbwgcfgrx)(struct net_device *, int, u8 *); + void (*setpfccfg)(struct net_device *, int, u8); + void (*getpfccfg)(struct net_device *, int, u8 *); + u8 (*setall)(struct net_device *); + u8 (*getcap)(struct net_device *, int, u8 *); + int (*getnumtcs)(struct net_device *, int, u8 *); + int (*setnumtcs)(struct net_device *, int, u8); + u8 (*getpfcstate)(struct net_device *); + void (*setpfcstate)(struct net_device *, u8); + void (*getbcncfg)(struct net_device *, int, u32 *); + void (*setbcncfg)(struct net_device *, int, u32); + void (*getbcnrp)(struct net_device *, int, u8 *); + void (*setbcnrp)(struct net_device *, int, u8); + int (*setapp)(struct net_device *, u8, u16, u8); + int (*getapp)(struct net_device *, u8, u16); + u8 (*getfeatcfg)(struct net_device *, int, u8 *); + u8 (*setfeatcfg)(struct net_device *, int, u8); + + + u8 (*getdcbx)(struct net_device *); + u8 (*setdcbx)(struct net_device *, u8); + + + int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, + u16 *); + int (*peer_getapptable)(struct net_device *, struct dcb_app *); + + + int (*cee_peer_getpg) (struct net_device *, struct cee_pg *); + int (*cee_peer_getpfc) (struct net_device *, struct cee_pfc *); + + + int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *); + int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *); +}; +# 41 "./include/linux/netdevice.h" 2 + +# 1 "./include/net/netprio_cgroup.h" 1 +# 16 "./include/net/netprio_cgroup.h" +struct netprio_map { + struct callback_head rcu; + u32 priomap_len; + u32 priomap[]; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 task_netprioidx(struct task_struct *p) +{ + struct cgroup_subsys_state *css; + u32 idx; + + rcu_read_lock(); + css = task_css(p, net_prio_cgrp_id); + idx = css->id; + rcu_read_unlock(); + return idx; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_update_netprioidx(struct sock_cgroup_data *skcd) +{ + if (((preempt_count() & ((((1UL << (4))-1) << ((0 + 8) + 8)) | (((1UL << (8))-1) << (0 + 8)) | (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))))) + return; + + sock_cgroup_set_prioidx(skcd, task_netprioidx(get_current())); +} +# 43 "./include/linux/netdevice.h" 2 +# 1 "./include/net/xdp.h" 1 +# 38 "./include/net/xdp.h" +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, + MEM_TYPE_PAGE_ORDER0, + MEM_TYPE_PAGE_POOL, + MEM_TYPE_XSK_BUFF_POOL, + MEM_TYPE_MAX, +}; + + + + + +struct xdp_mem_info { + u32 type; + u32 id; +}; + +struct page_pool; + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; + struct xdp_mem_info mem; +} __attribute__((__aligned__((1 << (6))))); + +struct xdp_txq_info { + struct net_device *dev; +}; + +struct xdp_buff { + void *data; + void *data_end; + void *data_meta; + void *data_hard_start; + struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; +}; +# 88 "./include/net/xdp.h" +struct xdp_frame { + void *data; + u16 len; + u16 headroom; + u32 metasize:8; + u32 frame_sz:24; + + + + struct xdp_mem_info mem; + struct net_device *dev_rx; +}; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xdp_scrub_frame(struct xdp_frame *frame) +{ + frame->data = ((void *)0); + frame->dev_rx = ((void *)0); +} + + +void xdp_warn(const char *msg, const char *func, const int line); + + +struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) +{ + xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame); + xdp->data = frame->data; + xdp->data_end = frame->data + frame->len; + xdp->data_meta = frame->data - frame->metasize; + xdp->frame_sz = frame->frame_sz; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) +{ + struct xdp_frame *xdp_frame; + int metasize; + int headroom; + + if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) + return xdp_convert_zc_to_xdp_frame(xdp); + + + headroom = xdp->data - xdp->data_hard_start; + metasize = xdp->data - xdp->data_meta; + metasize = metasize > 0 ? metasize : 0; + if (__builtin_expect(!!((headroom - metasize) < sizeof(*xdp_frame)), 0)) + return ((void *)0); + + + if (__builtin_expect(!!(xdp->data_end > ((xdp)->data_hard_start + (xdp)->frame_sz - ((((sizeof(struct skb_shared_info))) + ((typeof((sizeof(struct skb_shared_info))))(((1 << (6)))) - 1)) & ~((typeof((sizeof(struct skb_shared_info))))(((1 << (6)))) - 1)))), 0)) { + xdp_warn("Driver BUG: missing reserved tailroom", __func__, 144); + return ((void *)0); + } + + + xdp_frame = xdp->data_hard_start; + + xdp_frame->data = xdp->data; + xdp_frame->len = xdp->data_end - xdp->data; + xdp_frame->headroom = headroom - sizeof(*xdp_frame); + xdp_frame->metasize = metasize; + xdp_frame->frame_sz = xdp->frame_sz; + + + xdp_frame->mem = xdp->rxq->mem; + + return xdp_frame; +} + +void xdp_return_frame(struct xdp_frame *xdpf); +void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); +void xdp_return_buff(struct xdp_buff *xdp); + + + + + + +void __xdp_release_frame(void *data, struct xdp_mem_info *mem); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xdp_release_frame(struct xdp_frame *xdpf) +{ + struct xdp_mem_info *mem = &xdpf->mem; + + + if (mem->type == MEM_TYPE_PAGE_POOL) + __xdp_release_frame(xdpf->data, mem); +} + +int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index); +void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); +void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); +bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); +int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, + enum xdp_mem_type type, void *allocator); +void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void +xdp_set_data_meta_invalid(struct xdp_buff *xdp) +{ + xdp->data_meta = xdp->data + 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) bool +xdp_data_meta_unsupported(const struct xdp_buff *xdp) +{ + return __builtin_expect(!!(xdp->data_meta > xdp->data), 0); +} + +struct xdp_attachment_info { + struct bpf_prog *prog; + u32 flags; +}; + +struct netdev_bpf; +int xdp_attachment_query(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); +bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); +void xdp_attachment_setup(struct xdp_attachment_info *info, + struct netdev_bpf *bpf); +# 44 "./include/linux/netdevice.h" 2 + + +# 1 "./include/uapi/linux/neighbour.h" 1 + + + + + +# 1 "./include/linux/netlink.h" 1 +# 9 "./include/linux/netlink.h" +# 1 "./include/net/scm.h" 1 +# 18 "./include/net/scm.h" +struct scm_creds { + u32 pid; + kuid_t uid; + kgid_t gid; +}; + +struct scm_fp_list { + short count; + short max; + struct user_struct *user; + struct file *fp[253]; +}; + +struct scm_cookie { + struct pid *pid; + struct scm_fp_list *fp; + struct scm_creds creds; + + u32 secid; + +}; + +void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm); +void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm); +int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); +void __scm_destroy(struct scm_cookie *scm); +struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) +{ + security_socket_getpeersec_dgram(sock, ((void *)0), &scm->secid); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void scm_set_cred(struct scm_cookie *scm, + struct pid *pid, kuid_t uid, kgid_t gid) +{ + scm->pid = get_pid(pid); + scm->creds.pid = pid_vnr(pid); + scm->creds.uid = uid; + scm->creds.gid = gid; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void scm_destroy_cred(struct scm_cookie *scm) +{ + put_pid(scm->pid); + scm->pid = ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void scm_destroy(struct scm_cookie *scm) +{ + scm_destroy_cred(scm); + if (scm->fp) + __scm_destroy(scm); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int scm_send(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, bool forcecreds) +{ + memset(scm, 0, sizeof(*scm)); + scm->creds.uid = (kuid_t){ -1 }; + scm->creds.gid = (kgid_t){ -1 }; + if (forcecreds) + scm_set_cred(scm, task_tgid(get_current()), (({ ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("include/net/scm.h", 85, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->uid; })), (({ ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("include/net/scm.h", 85, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->gid; }))); + unix_get_peersec_dgram(sock, scm); + if (msg->msg_controllen <= 0) + return 0; + return __scm_send(sock, msg, scm); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) +{ + char *secdata; + u32 seclen; + int err; + + if (test_bit(4, &sock->flags)) { + err = security_secid_to_secctx(scm->secid, &secdata, &seclen); + + if (!err) { + put_cmsg(msg, 1, 0x03, seclen, secdata); + security_release_secctx(secdata, seclen); + } + } +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void scm_recv(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags) +{ + if (!msg->msg_control) { + if (test_bit(3, &sock->flags) || scm->fp) + msg->msg_flags |= 8; + scm_destroy(scm); + return; + } + + if (test_bit(3, &sock->flags)) { + struct user_namespace *current_ns = (({ ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("include/net/scm.h", 124, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->user_ns; })); + struct ucred ucreds = { + .pid = scm->creds.pid, + .uid = from_kuid_munged(current_ns, scm->creds.uid), + .gid = from_kgid_munged(current_ns, scm->creds.gid), + }; + put_cmsg(msg, 1, 0x02, sizeof(ucreds), &ucreds); + } + + scm_destroy_cred(scm); + + scm_passec(sock, msg, scm); + + if (!scm->fp) + return; + + scm_detach_fds(msg, scm); +} +# 10 "./include/linux/netlink.h" 2 +# 1 "./include/uapi/linux/netlink.h" 1 +# 37 "./include/uapi/linux/netlink.h" +struct sockaddr_nl { + __kernel_sa_family_t nl_family; + unsigned short nl_pad; + __u32 nl_pid; + __u32 nl_groups; +}; + +struct nlmsghdr { + __u32 nlmsg_len; + __u16 nlmsg_type; + __u16 nlmsg_flags; + __u32 nlmsg_seq; + __u32 nlmsg_pid; +}; +# 109 "./include/uapi/linux/netlink.h" +struct nlmsgerr { + int error; + struct nlmsghdr msg; +# 121 "./include/uapi/linux/netlink.h" +}; +# 135 "./include/uapi/linux/netlink.h" +enum nlmsgerr_attrs { + NLMSGERR_ATTR_UNUSED, + NLMSGERR_ATTR_MSG, + NLMSGERR_ATTR_OFFS, + NLMSGERR_ATTR_COOKIE, + + __NLMSGERR_ATTR_MAX, + NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1 +}; +# 160 "./include/uapi/linux/netlink.h" +struct nl_pktinfo { + __u32 group; +}; + +struct nl_mmap_req { + unsigned int nm_block_size; + unsigned int nm_block_nr; + unsigned int nm_frame_size; + unsigned int nm_frame_nr; +}; + +struct nl_mmap_hdr { + unsigned int nm_status; + unsigned int nm_len; + __u32 nm_group; + + __u32 nm_pid; + __u32 nm_uid; + __u32 nm_gid; +}; +# 197 "./include/uapi/linux/netlink.h" +enum { + NETLINK_UNCONNECTED = 0, + NETLINK_CONNECTED, +}; +# 211 "./include/uapi/linux/netlink.h" +struct nlattr { + __u16 nla_len; + __u16 nla_type; +}; +# 247 "./include/uapi/linux/netlink.h" +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; +# 284 "./include/uapi/linux/netlink.h" +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID, + + NL_ATTR_TYPE_FLAG, + + NL_ATTR_TYPE_U8, + NL_ATTR_TYPE_U16, + NL_ATTR_TYPE_U32, + NL_ATTR_TYPE_U64, + + NL_ATTR_TYPE_S8, + NL_ATTR_TYPE_S16, + NL_ATTR_TYPE_S32, + NL_ATTR_TYPE_S64, + + NL_ATTR_TYPE_BINARY, + NL_ATTR_TYPE_STRING, + NL_ATTR_TYPE_NUL_STRING, + + NL_ATTR_TYPE_NESTED, + NL_ATTR_TYPE_NESTED_ARRAY, + + NL_ATTR_TYPE_BITFIELD32, +}; +# 336 "./include/uapi/linux/netlink.h" +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC, + NL_POLICY_TYPE_ATTR_TYPE, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U, + NL_POLICY_TYPE_ATTR_MIN_LENGTH, + NL_POLICY_TYPE_ATTR_MAX_LENGTH, + NL_POLICY_TYPE_ATTR_POLICY_IDX, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK, + NL_POLICY_TYPE_ATTR_PAD, + + + __NL_POLICY_TYPE_ATTR_MAX, + NL_POLICY_TYPE_ATTR_MAX = __NL_POLICY_TYPE_ATTR_MAX - 1 +}; +# 11 "./include/linux/netlink.h" 2 + +struct net; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) +{ + return (struct nlmsghdr *)skb->data; +} + +enum netlink_skb_flags { + NETLINK_SKB_DST = 0x8, +}; + +struct netlink_skb_parms { + struct scm_creds creds; + __u32 portid; + __u32 dst_group; + __u32 flags; + struct sock *sk; + bool nsid_is_set; + int nsid; +}; + + + + + +void netlink_table_grab(void); +void netlink_table_ungrab(void); + + + + + +struct netlink_kernel_cfg { + unsigned int groups; + unsigned int flags; + void (*input)(struct sk_buff *skb); + struct mutex *cb_mutex; + int (*bind)(struct net *net, int group); + void (*unbind)(struct net *net, int group); + bool (*compare)(struct net *net, struct sock *sk); +}; + +struct sock *__netlink_kernel_create(struct net *net, int unit, + struct module *module, + struct netlink_kernel_cfg *cfg); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock * +netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg) +{ + return __netlink_kernel_create(net, unit, ((struct module *)0), cfg); +} +# 74 "./include/linux/netlink.h" +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[20]; + u8 cookie_len; +}; +# 113 "./include/linux/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, + u64 cookie) +{ + u64 __cookie = cookie; + + if (!extack) + return; + memcpy(extack->cookie, &__cookie, sizeof(__cookie)); + extack->cookie_len = sizeof(__cookie); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack, + u32 cookie) +{ + u32 __cookie = cookie; + + if (!extack) + return; + memcpy(extack->cookie, &__cookie, sizeof(__cookie)); + extack->cookie_len = sizeof(__cookie); +} + +void netlink_kernel_release(struct sock *sk); +int __netlink_change_ngroups(struct sock *sk, unsigned int groups); +int netlink_change_ngroups(struct sock *sk, unsigned int groups); +void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); +void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, + const struct netlink_ext_ack *extack); +int netlink_has_listeners(struct sock *sk, unsigned int group); +bool netlink_strict_get_check(struct sk_buff *skb); + +int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); +int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, + __u32 group, gfp_t allocation); +int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, + __u32 portid, __u32 group, gfp_t allocation, + int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), + void *filter_data); +int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); +int netlink_register_notifier(struct notifier_block *nb); +int netlink_unregister_notifier(struct notifier_block *nb); + + +struct sock *netlink_getsockbyfilp(struct file *filp); +int netlink_attachskb(struct sock *sk, struct sk_buff *skb, + long *timeo, struct sock *ssk); +void netlink_detachskb(struct sock *sk, struct sk_buff *skb); +int netlink_sendskb(struct sock *sk, struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff * +netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) +{ + struct sk_buff *nskb; + + nskb = skb_clone(skb, gfp_mask); + if (!nskb) + return ((void *)0); + + + if (is_vmalloc_addr(skb->head)) + nskb->destructor = skb->destructor; + + return nskb; +} +# 193 "./include/linux/netlink.h" +struct netlink_callback { + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff * skb, + struct netlink_callback *cb); + int (*done)(struct netlink_callback *cb); + void *data; + + struct module *module; + struct netlink_ext_ack *extack; + u16 family; + u16 answer_flags; + u32 min_dump_alloc; + unsigned int prev_seq, seq; + bool strict_check; + union { + u8 ctx[48]; + + + + + long args[6]; + }; +}; + +struct netlink_notify { + struct net *net; + u32 portid; + int protocol; +}; + +struct nlmsghdr * +__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); + +struct netlink_dump_control { + int (*start)(struct netlink_callback *); + int (*dump)(struct sk_buff *skb, struct netlink_callback *); + int (*done)(struct netlink_callback *); + void *data; + struct module *module; + u16 min_dump_alloc; +}; + +int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control) +{ + if (!control->module) + control->module = ((struct module *)0); + + return __netlink_dump_start(ssk, skb, nlh, control); +} + +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +int netlink_add_tap(struct netlink_tap *nt); +int netlink_remove_tap(struct netlink_tap *nt); + +bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, + struct user_namespace *ns, int cap); +bool netlink_ns_capable(const struct sk_buff *skb, + struct user_namespace *ns, int cap); +bool netlink_capable(const struct sk_buff *skb, int cap); +bool netlink_net_capable(const struct sk_buff *skb, int cap); +# 7 "./include/uapi/linux/neighbour.h" 2 + +struct ndmsg { + __u8 ndm_family; + __u8 ndm_pad1; + __u16 ndm_pad2; + __s32 ndm_ifindex; + __u16 ndm_state; + __u8 ndm_flags; + __u8 ndm_type; +}; + +enum { + NDA_UNSPEC, + NDA_DST, + NDA_LLADDR, + NDA_CACHEINFO, + NDA_PROBES, + NDA_VLAN, + NDA_PORT, + NDA_VNI, + NDA_IFINDEX, + NDA_MASTER, + NDA_LINK_NETNSID, + NDA_SRC_VNI, + NDA_PROTOCOL, + NDA_NH_ID, + __NDA_MAX +}; +# 72 "./include/uapi/linux/neighbour.h" +struct nda_cacheinfo { + __u32 ndm_confirmed; + __u32 ndm_used; + __u32 ndm_updated; + __u32 ndm_refcnt; +}; +# 104 "./include/uapi/linux/neighbour.h" +struct ndt_stats { + __u64 ndts_allocs; + __u64 ndts_destroys; + __u64 ndts_hash_grows; + __u64 ndts_res_failed; + __u64 ndts_lookups; + __u64 ndts_hits; + __u64 ndts_rcv_probes_mcast; + __u64 ndts_rcv_probes_ucast; + __u64 ndts_periodic_gc_runs; + __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; +}; + +enum { + NDTPA_UNSPEC, + NDTPA_IFINDEX, + NDTPA_REFCNT, + NDTPA_REACHABLE_TIME, + NDTPA_BASE_REACHABLE_TIME, + NDTPA_RETRANS_TIME, + NDTPA_GC_STALETIME, + NDTPA_DELAY_PROBE_TIME, + NDTPA_QUEUE_LEN, + NDTPA_APP_PROBES, + NDTPA_UCAST_PROBES, + NDTPA_MCAST_PROBES, + NDTPA_ANYCAST_DELAY, + NDTPA_PROXY_DELAY, + NDTPA_PROXY_QLEN, + NDTPA_LOCKTIME, + NDTPA_QUEUE_LENBYTES, + NDTPA_MCAST_REPROBES, + NDTPA_PAD, + __NDTPA_MAX +}; + + +struct ndtmsg { + __u8 ndtm_family; + __u8 ndtm_pad1; + __u16 ndtm_pad2; +}; + +struct ndt_config { + __u16 ndtc_key_len; + __u16 ndtc_entry_size; + __u32 ndtc_entries; + __u32 ndtc_last_flush; + __u32 ndtc_last_rand; + __u32 ndtc_hash_rnd; + __u32 ndtc_hash_mask; + __u32 ndtc_hash_chain_gc; + __u32 ndtc_proxy_qlen; +}; + +enum { + NDTA_UNSPEC, + NDTA_NAME, + NDTA_THRESH1, + NDTA_THRESH2, + NDTA_THRESH3, + NDTA_CONFIG, + NDTA_PARMS, + NDTA_STATS, + NDTA_GC_INTERVAL, + NDTA_PAD, + __NDTA_MAX +}; +# 47 "./include/linux/netdevice.h" 2 +# 1 "./include/uapi/linux/netdevice.h" 1 +# 32 "./include/uapi/linux/netdevice.h" +# 1 "./include/linux/if_link.h" 1 + + + + +# 1 "./include/uapi/linux/if_link.h" 1 +# 9 "./include/uapi/linux/if_link.h" +struct rtnl_link_stats { + __u32 rx_packets; + __u32 tx_packets; + __u32 rx_bytes; + __u32 tx_bytes; + __u32 rx_errors; + __u32 tx_errors; + __u32 rx_dropped; + __u32 tx_dropped; + __u32 multicast; + __u32 collisions; + + + __u32 rx_length_errors; + __u32 rx_over_errors; + __u32 rx_crc_errors; + __u32 rx_frame_errors; + __u32 rx_fifo_errors; + __u32 rx_missed_errors; + + + __u32 tx_aborted_errors; + __u32 tx_carrier_errors; + __u32 tx_fifo_errors; + __u32 tx_heartbeat_errors; + __u32 tx_window_errors; + + + __u32 rx_compressed; + __u32 tx_compressed; + + __u32 rx_nohandler; +}; + + +struct rtnl_link_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; + __u64 collisions; + + + __u64 rx_length_errors; + __u64 rx_over_errors; + __u64 rx_crc_errors; + __u64 rx_frame_errors; + __u64 rx_fifo_errors; + __u64 rx_missed_errors; + + + __u64 tx_aborted_errors; + __u64 tx_carrier_errors; + __u64 tx_fifo_errors; + __u64 tx_heartbeat_errors; + __u64 tx_window_errors; + + + __u64 rx_compressed; + __u64 tx_compressed; + + __u64 rx_nohandler; +}; + + +struct rtnl_link_ifmap { + __u64 mem_start; + __u64 mem_end; + __u64 base_addr; + __u16 irq; + __u8 dma; + __u8 port; +}; +# 106 "./include/uapi/linux/if_link.h" +enum { + IFLA_UNSPEC, + IFLA_ADDRESS, + IFLA_BROADCAST, + IFLA_IFNAME, + IFLA_MTU, + IFLA_LINK, + IFLA_QDISC, + IFLA_STATS, + IFLA_COST, + + IFLA_PRIORITY, + + IFLA_MASTER, + + IFLA_WIRELESS, + + IFLA_PROTINFO, + + IFLA_TXQLEN, + + IFLA_MAP, + + IFLA_WEIGHT, + + IFLA_OPERSTATE, + IFLA_LINKMODE, + IFLA_LINKINFO, + + IFLA_NET_NS_PID, + IFLA_IFALIAS, + IFLA_NUM_VF, + IFLA_VFINFO_LIST, + IFLA_STATS64, + IFLA_VF_PORTS, + IFLA_PORT_SELF, + IFLA_AF_SPEC, + IFLA_GROUP, + IFLA_NET_NS_FD, + IFLA_EXT_MASK, + IFLA_PROMISCUITY, + + IFLA_NUM_TX_QUEUES, + IFLA_NUM_RX_QUEUES, + IFLA_CARRIER, + IFLA_PHYS_PORT_ID, + IFLA_CARRIER_CHANGES, + IFLA_PHYS_SWITCH_ID, + IFLA_LINK_NETNSID, + IFLA_PHYS_PORT_NAME, + IFLA_PROTO_DOWN, + IFLA_GSO_MAX_SEGS, + IFLA_GSO_MAX_SIZE, + IFLA_PAD, + IFLA_XDP, + IFLA_EVENT, + IFLA_NEW_NETNSID, + IFLA_IF_NETNSID, + IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, + IFLA_CARRIER_UP_COUNT, + IFLA_CARRIER_DOWN_COUNT, + IFLA_NEW_IFINDEX, + IFLA_MIN_MTU, + IFLA_MAX_MTU, + IFLA_PROP_LIST, + IFLA_ALT_IFNAME, + IFLA_PERM_ADDRESS, + __IFLA_MAX +}; +# 185 "./include/uapi/linux/if_link.h" +enum { + IFLA_INET_UNSPEC, + IFLA_INET_CONF, + __IFLA_INET_MAX, +}; +# 223 "./include/uapi/linux/if_link.h" +enum { + IFLA_INET6_UNSPEC, + IFLA_INET6_FLAGS, + IFLA_INET6_CONF, + IFLA_INET6_STATS, + IFLA_INET6_MCAST, + IFLA_INET6_CACHEINFO, + IFLA_INET6_ICMP6STATS, + IFLA_INET6_TOKEN, + IFLA_INET6_ADDR_GEN_MODE, + __IFLA_INET6_MAX +}; + + + +enum in6_addr_gen_mode { + IN6_ADDR_GEN_MODE_EUI64, + IN6_ADDR_GEN_MODE_NONE, + IN6_ADDR_GEN_MODE_STABLE_PRIVACY, + IN6_ADDR_GEN_MODE_RANDOM, +}; + + + +enum { + IFLA_BR_UNSPEC, + IFLA_BR_FORWARD_DELAY, + IFLA_BR_HELLO_TIME, + IFLA_BR_MAX_AGE, + IFLA_BR_AGEING_TIME, + IFLA_BR_STP_STATE, + IFLA_BR_PRIORITY, + IFLA_BR_VLAN_FILTERING, + IFLA_BR_VLAN_PROTOCOL, + IFLA_BR_GROUP_FWD_MASK, + IFLA_BR_ROOT_ID, + IFLA_BR_BRIDGE_ID, + IFLA_BR_ROOT_PORT, + IFLA_BR_ROOT_PATH_COST, + IFLA_BR_TOPOLOGY_CHANGE, + IFLA_BR_TOPOLOGY_CHANGE_DETECTED, + IFLA_BR_HELLO_TIMER, + IFLA_BR_TCN_TIMER, + IFLA_BR_TOPOLOGY_CHANGE_TIMER, + IFLA_BR_GC_TIMER, + IFLA_BR_GROUP_ADDR, + IFLA_BR_FDB_FLUSH, + IFLA_BR_MCAST_ROUTER, + IFLA_BR_MCAST_SNOOPING, + IFLA_BR_MCAST_QUERY_USE_IFADDR, + IFLA_BR_MCAST_QUERIER, + IFLA_BR_MCAST_HASH_ELASTICITY, + IFLA_BR_MCAST_HASH_MAX, + IFLA_BR_MCAST_LAST_MEMBER_CNT, + IFLA_BR_MCAST_STARTUP_QUERY_CNT, + IFLA_BR_MCAST_LAST_MEMBER_INTVL, + IFLA_BR_MCAST_MEMBERSHIP_INTVL, + IFLA_BR_MCAST_QUERIER_INTVL, + IFLA_BR_MCAST_QUERY_INTVL, + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, + IFLA_BR_MCAST_STARTUP_QUERY_INTVL, + IFLA_BR_NF_CALL_IPTABLES, + IFLA_BR_NF_CALL_IP6TABLES, + IFLA_BR_NF_CALL_ARPTABLES, + IFLA_BR_VLAN_DEFAULT_PVID, + IFLA_BR_PAD, + IFLA_BR_VLAN_STATS_ENABLED, + IFLA_BR_MCAST_STATS_ENABLED, + IFLA_BR_MCAST_IGMP_VERSION, + IFLA_BR_MCAST_MLD_VERSION, + IFLA_BR_VLAN_STATS_PER_PORT, + IFLA_BR_MULTI_BOOLOPT, + __IFLA_BR_MAX, +}; + + + +struct ifla_bridge_id { + __u8 prio[2]; + __u8 addr[6]; +}; + +enum { + BRIDGE_MODE_UNSPEC, + BRIDGE_MODE_HAIRPIN, +}; + +enum { + IFLA_BRPORT_UNSPEC, + IFLA_BRPORT_STATE, + IFLA_BRPORT_PRIORITY, + IFLA_BRPORT_COST, + IFLA_BRPORT_MODE, + IFLA_BRPORT_GUARD, + IFLA_BRPORT_PROTECT, + IFLA_BRPORT_FAST_LEAVE, + IFLA_BRPORT_LEARNING, + IFLA_BRPORT_UNICAST_FLOOD, + IFLA_BRPORT_PROXYARP, + IFLA_BRPORT_LEARNING_SYNC, + IFLA_BRPORT_PROXYARP_WIFI, + IFLA_BRPORT_ROOT_ID, + IFLA_BRPORT_BRIDGE_ID, + IFLA_BRPORT_DESIGNATED_PORT, + IFLA_BRPORT_DESIGNATED_COST, + IFLA_BRPORT_ID, + IFLA_BRPORT_NO, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, + IFLA_BRPORT_CONFIG_PENDING, + IFLA_BRPORT_MESSAGE_AGE_TIMER, + IFLA_BRPORT_FORWARD_DELAY_TIMER, + IFLA_BRPORT_HOLD_TIMER, + IFLA_BRPORT_FLUSH, + IFLA_BRPORT_MULTICAST_ROUTER, + IFLA_BRPORT_PAD, + IFLA_BRPORT_MCAST_FLOOD, + IFLA_BRPORT_MCAST_TO_UCAST, + IFLA_BRPORT_VLAN_TUNNEL, + IFLA_BRPORT_BCAST_FLOOD, + IFLA_BRPORT_GROUP_FWD_MASK, + IFLA_BRPORT_NEIGH_SUPPRESS, + IFLA_BRPORT_ISOLATED, + IFLA_BRPORT_BACKUP_PORT, + IFLA_BRPORT_MRP_RING_OPEN, + __IFLA_BRPORT_MAX +}; + + +struct ifla_cacheinfo { + __u32 max_reasm_len; + __u32 tstamp; + __u32 reachable_time; + __u32 retrans_time; +}; + +enum { + IFLA_INFO_UNSPEC, + IFLA_INFO_KIND, + IFLA_INFO_DATA, + IFLA_INFO_XSTATS, + IFLA_INFO_SLAVE_KIND, + IFLA_INFO_SLAVE_DATA, + __IFLA_INFO_MAX, +}; + + + + + +enum { + IFLA_VLAN_UNSPEC, + IFLA_VLAN_ID, + IFLA_VLAN_FLAGS, + IFLA_VLAN_EGRESS_QOS, + IFLA_VLAN_INGRESS_QOS, + IFLA_VLAN_PROTOCOL, + __IFLA_VLAN_MAX, +}; + + + +struct ifla_vlan_flags { + __u32 flags; + __u32 mask; +}; + +enum { + IFLA_VLAN_QOS_UNSPEC, + IFLA_VLAN_QOS_MAPPING, + __IFLA_VLAN_QOS_MAX +}; + + + +struct ifla_vlan_qos_mapping { + __u32 from; + __u32 to; +}; + + +enum { + IFLA_MACVLAN_UNSPEC, + IFLA_MACVLAN_MODE, + IFLA_MACVLAN_FLAGS, + IFLA_MACVLAN_MACADDR_MODE, + IFLA_MACVLAN_MACADDR, + IFLA_MACVLAN_MACADDR_DATA, + IFLA_MACVLAN_MACADDR_COUNT, + __IFLA_MACVLAN_MAX, +}; + + + +enum macvlan_mode { + MACVLAN_MODE_PRIVATE = 1, + MACVLAN_MODE_VEPA = 2, + MACVLAN_MODE_BRIDGE = 4, + MACVLAN_MODE_PASSTHRU = 8, + MACVLAN_MODE_SOURCE = 16, +}; + +enum macvlan_macaddr_mode { + MACVLAN_MACADDR_ADD, + MACVLAN_MACADDR_DEL, + MACVLAN_MACADDR_FLUSH, + MACVLAN_MACADDR_SET, +}; + + + + +enum { + IFLA_VRF_UNSPEC, + IFLA_VRF_TABLE, + __IFLA_VRF_MAX +}; + + + +enum { + IFLA_VRF_PORT_UNSPEC, + IFLA_VRF_PORT_TABLE, + __IFLA_VRF_PORT_MAX +}; + + + + +enum { + IFLA_MACSEC_UNSPEC, + IFLA_MACSEC_SCI, + IFLA_MACSEC_PORT, + IFLA_MACSEC_ICV_LEN, + IFLA_MACSEC_CIPHER_SUITE, + IFLA_MACSEC_WINDOW, + IFLA_MACSEC_ENCODING_SA, + IFLA_MACSEC_ENCRYPT, + IFLA_MACSEC_PROTECT, + IFLA_MACSEC_INC_SCI, + IFLA_MACSEC_ES, + IFLA_MACSEC_SCB, + IFLA_MACSEC_REPLAY_PROTECT, + IFLA_MACSEC_VALIDATION, + IFLA_MACSEC_PAD, + IFLA_MACSEC_OFFLOAD, + __IFLA_MACSEC_MAX, +}; + + + + +enum { + IFLA_XFRM_UNSPEC, + IFLA_XFRM_LINK, + IFLA_XFRM_IF_ID, + __IFLA_XFRM_MAX +}; + + + +enum macsec_validation_type { + MACSEC_VALIDATE_DISABLED = 0, + MACSEC_VALIDATE_CHECK = 1, + MACSEC_VALIDATE_STRICT = 2, + __MACSEC_VALIDATE_END, + MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1, +}; + +enum macsec_offload { + MACSEC_OFFLOAD_OFF = 0, + MACSEC_OFFLOAD_PHY = 1, + MACSEC_OFFLOAD_MAC = 2, + __MACSEC_OFFLOAD_END, + MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1, +}; + + +enum { + IFLA_IPVLAN_UNSPEC, + IFLA_IPVLAN_MODE, + IFLA_IPVLAN_FLAGS, + __IFLA_IPVLAN_MAX +}; + + + +enum ipvlan_mode { + IPVLAN_MODE_L2 = 0, + IPVLAN_MODE_L3, + IPVLAN_MODE_L3S, + IPVLAN_MODE_MAX +}; + + + + + +enum { + IFLA_VXLAN_UNSPEC, + IFLA_VXLAN_ID, + IFLA_VXLAN_GROUP, + IFLA_VXLAN_LINK, + IFLA_VXLAN_LOCAL, + IFLA_VXLAN_TTL, + IFLA_VXLAN_TOS, + IFLA_VXLAN_LEARNING, + IFLA_VXLAN_AGEING, + IFLA_VXLAN_LIMIT, + IFLA_VXLAN_PORT_RANGE, + IFLA_VXLAN_PROXY, + IFLA_VXLAN_RSC, + IFLA_VXLAN_L2MISS, + IFLA_VXLAN_L3MISS, + IFLA_VXLAN_PORT, + IFLA_VXLAN_GROUP6, + IFLA_VXLAN_LOCAL6, + IFLA_VXLAN_UDP_CSUM, + IFLA_VXLAN_UDP_ZERO_CSUM6_TX, + IFLA_VXLAN_UDP_ZERO_CSUM6_RX, + IFLA_VXLAN_REMCSUM_TX, + IFLA_VXLAN_REMCSUM_RX, + IFLA_VXLAN_GBP, + IFLA_VXLAN_REMCSUM_NOPARTIAL, + IFLA_VXLAN_COLLECT_METADATA, + IFLA_VXLAN_LABEL, + IFLA_VXLAN_GPE, + IFLA_VXLAN_TTL_INHERIT, + IFLA_VXLAN_DF, + __IFLA_VXLAN_MAX +}; + + +struct ifla_vxlan_port_range { + __be16 low; + __be16 high; +}; + +enum ifla_vxlan_df { + VXLAN_DF_UNSET = 0, + VXLAN_DF_SET, + VXLAN_DF_INHERIT, + __VXLAN_DF_END, + VXLAN_DF_MAX = __VXLAN_DF_END - 1, +}; + + +enum { + IFLA_GENEVE_UNSPEC, + IFLA_GENEVE_ID, + IFLA_GENEVE_REMOTE, + IFLA_GENEVE_TTL, + IFLA_GENEVE_TOS, + IFLA_GENEVE_PORT, + IFLA_GENEVE_COLLECT_METADATA, + IFLA_GENEVE_REMOTE6, + IFLA_GENEVE_UDP_CSUM, + IFLA_GENEVE_UDP_ZERO_CSUM6_TX, + IFLA_GENEVE_UDP_ZERO_CSUM6_RX, + IFLA_GENEVE_LABEL, + IFLA_GENEVE_TTL_INHERIT, + IFLA_GENEVE_DF, + __IFLA_GENEVE_MAX +}; + + +enum ifla_geneve_df { + GENEVE_DF_UNSET = 0, + GENEVE_DF_SET, + GENEVE_DF_INHERIT, + __GENEVE_DF_END, + GENEVE_DF_MAX = __GENEVE_DF_END - 1, +}; + + +enum { + IFLA_BAREUDP_UNSPEC, + IFLA_BAREUDP_PORT, + IFLA_BAREUDP_ETHERTYPE, + IFLA_BAREUDP_SRCPORT_MIN, + IFLA_BAREUDP_MULTIPROTO_MODE, + __IFLA_BAREUDP_MAX +}; + + + + +enum { + IFLA_PPP_UNSPEC, + IFLA_PPP_DEV_FD, + __IFLA_PPP_MAX +}; + + + + +enum ifla_gtp_role { + GTP_ROLE_GGSN = 0, + GTP_ROLE_SGSN, +}; + +enum { + IFLA_GTP_UNSPEC, + IFLA_GTP_FD0, + IFLA_GTP_FD1, + IFLA_GTP_PDP_HASHSIZE, + IFLA_GTP_ROLE, + __IFLA_GTP_MAX, +}; + + + + +enum { + IFLA_BOND_UNSPEC, + IFLA_BOND_MODE, + IFLA_BOND_ACTIVE_SLAVE, + IFLA_BOND_MIIMON, + IFLA_BOND_UPDELAY, + IFLA_BOND_DOWNDELAY, + IFLA_BOND_USE_CARRIER, + IFLA_BOND_ARP_INTERVAL, + IFLA_BOND_ARP_IP_TARGET, + IFLA_BOND_ARP_VALIDATE, + IFLA_BOND_ARP_ALL_TARGETS, + IFLA_BOND_PRIMARY, + IFLA_BOND_PRIMARY_RESELECT, + IFLA_BOND_FAIL_OVER_MAC, + IFLA_BOND_XMIT_HASH_POLICY, + IFLA_BOND_RESEND_IGMP, + IFLA_BOND_NUM_PEER_NOTIF, + IFLA_BOND_ALL_SLAVES_ACTIVE, + IFLA_BOND_MIN_LINKS, + IFLA_BOND_LP_INTERVAL, + IFLA_BOND_PACKETS_PER_SLAVE, + IFLA_BOND_AD_LACP_RATE, + IFLA_BOND_AD_SELECT, + IFLA_BOND_AD_INFO, + IFLA_BOND_AD_ACTOR_SYS_PRIO, + IFLA_BOND_AD_USER_PORT_KEY, + IFLA_BOND_AD_ACTOR_SYSTEM, + IFLA_BOND_TLB_DYNAMIC_LB, + IFLA_BOND_PEER_NOTIF_DELAY, + __IFLA_BOND_MAX, +}; + + + +enum { + IFLA_BOND_AD_INFO_UNSPEC, + IFLA_BOND_AD_INFO_AGGREGATOR, + IFLA_BOND_AD_INFO_NUM_PORTS, + IFLA_BOND_AD_INFO_ACTOR_KEY, + IFLA_BOND_AD_INFO_PARTNER_KEY, + IFLA_BOND_AD_INFO_PARTNER_MAC, + __IFLA_BOND_AD_INFO_MAX, +}; + + + +enum { + IFLA_BOND_SLAVE_UNSPEC, + IFLA_BOND_SLAVE_STATE, + IFLA_BOND_SLAVE_MII_STATUS, + IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, + IFLA_BOND_SLAVE_PERM_HWADDR, + IFLA_BOND_SLAVE_QUEUE_ID, + IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, + IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, + IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, + __IFLA_BOND_SLAVE_MAX, +}; + + + + + +enum { + IFLA_VF_INFO_UNSPEC, + IFLA_VF_INFO, + __IFLA_VF_INFO_MAX, +}; + + + +enum { + IFLA_VF_UNSPEC, + IFLA_VF_MAC, + IFLA_VF_VLAN, + IFLA_VF_TX_RATE, + IFLA_VF_SPOOFCHK, + IFLA_VF_LINK_STATE, + IFLA_VF_RATE, + IFLA_VF_RSS_QUERY_EN, + + + IFLA_VF_STATS, + IFLA_VF_TRUST, + IFLA_VF_IB_NODE_GUID, + IFLA_VF_IB_PORT_GUID, + IFLA_VF_VLAN_LIST, + IFLA_VF_BROADCAST, + __IFLA_VF_MAX, +}; + + + +struct ifla_vf_mac { + __u32 vf; + __u8 mac[32]; +}; + +struct ifla_vf_broadcast { + __u8 broadcast[32]; +}; + +struct ifla_vf_vlan { + __u32 vf; + __u32 vlan; + __u32 qos; +}; + +enum { + IFLA_VF_VLAN_INFO_UNSPEC, + IFLA_VF_VLAN_INFO, + __IFLA_VF_VLAN_INFO_MAX, +}; + + + + +struct ifla_vf_vlan_info { + __u32 vf; + __u32 vlan; + __u32 qos; + __be16 vlan_proto; +}; + +struct ifla_vf_tx_rate { + __u32 vf; + __u32 rate; +}; + +struct ifla_vf_rate { + __u32 vf; + __u32 min_tx_rate; + __u32 max_tx_rate; +}; + +struct ifla_vf_spoofchk { + __u32 vf; + __u32 setting; +}; + +struct ifla_vf_guid { + __u32 vf; + __u64 guid; +}; + +enum { + IFLA_VF_LINK_STATE_AUTO, + IFLA_VF_LINK_STATE_ENABLE, + IFLA_VF_LINK_STATE_DISABLE, + __IFLA_VF_LINK_STATE_MAX, +}; + +struct ifla_vf_link_state { + __u32 vf; + __u32 link_state; +}; + +struct ifla_vf_rss_query_en { + __u32 vf; + __u32 setting; +}; + +enum { + IFLA_VF_STATS_RX_PACKETS, + IFLA_VF_STATS_TX_PACKETS, + IFLA_VF_STATS_RX_BYTES, + IFLA_VF_STATS_TX_BYTES, + IFLA_VF_STATS_BROADCAST, + IFLA_VF_STATS_MULTICAST, + IFLA_VF_STATS_PAD, + IFLA_VF_STATS_RX_DROPPED, + IFLA_VF_STATS_TX_DROPPED, + __IFLA_VF_STATS_MAX, +}; + + + +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; +# 833 "./include/uapi/linux/if_link.h" +enum { + IFLA_VF_PORT_UNSPEC, + IFLA_VF_PORT, + __IFLA_VF_PORT_MAX, +}; + + + +enum { + IFLA_PORT_UNSPEC, + IFLA_PORT_VF, + IFLA_PORT_PROFILE, + IFLA_PORT_VSI_TYPE, + IFLA_PORT_INSTANCE_UUID, + IFLA_PORT_HOST_UUID, + IFLA_PORT_REQUEST, + IFLA_PORT_RESPONSE, + __IFLA_PORT_MAX, +}; + + + + + + + +enum { + PORT_REQUEST_PREASSOCIATE = 0, + PORT_REQUEST_PREASSOCIATE_RR, + PORT_REQUEST_ASSOCIATE, + PORT_REQUEST_DISASSOCIATE, +}; + +enum { + PORT_VDP_RESPONSE_SUCCESS = 0, + PORT_VDP_RESPONSE_INVALID_FORMAT, + PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES, + PORT_VDP_RESPONSE_UNUSED_VTID, + PORT_VDP_RESPONSE_VTID_VIOLATION, + PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION, + PORT_VDP_RESPONSE_OUT_OF_SYNC, + + PORT_PROFILE_RESPONSE_SUCCESS = 0x100, + PORT_PROFILE_RESPONSE_INPROGRESS, + PORT_PROFILE_RESPONSE_INVALID, + PORT_PROFILE_RESPONSE_BADSTATE, + PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES, + PORT_PROFILE_RESPONSE_ERROR, +}; + +struct ifla_port_vsi { + __u8 vsi_mgr_id; + __u8 vsi_type_id[3]; + __u8 vsi_type_version; + __u8 pad[3]; +}; + + + + +enum { + IFLA_IPOIB_UNSPEC, + IFLA_IPOIB_PKEY, + IFLA_IPOIB_MODE, + IFLA_IPOIB_UMCAST, + __IFLA_IPOIB_MAX +}; + +enum { + IPOIB_MODE_DATAGRAM = 0, + IPOIB_MODE_CONNECTED = 1, +}; + + + + + + +enum { + IFLA_HSR_UNSPEC, + IFLA_HSR_SLAVE1, + IFLA_HSR_SLAVE2, + IFLA_HSR_MULTICAST_SPEC, + IFLA_HSR_SUPERVISION_ADDR, + IFLA_HSR_SEQ_NR, + IFLA_HSR_VERSION, + __IFLA_HSR_MAX, +}; + + + + + +struct if_stats_msg { + __u8 family; + __u8 pad1; + __u16 pad2; + __u32 ifindex; + __u32 filter_mask; +}; + + + + +enum { + IFLA_STATS_UNSPEC, + IFLA_STATS_LINK_64, + IFLA_STATS_LINK_XSTATS, + IFLA_STATS_LINK_XSTATS_SLAVE, + IFLA_STATS_LINK_OFFLOAD_XSTATS, + IFLA_STATS_AF_SPEC, + __IFLA_STATS_MAX, +}; +# 956 "./include/uapi/linux/if_link.h" +enum { + LINK_XSTATS_TYPE_UNSPEC, + LINK_XSTATS_TYPE_BRIDGE, + LINK_XSTATS_TYPE_BOND, + __LINK_XSTATS_TYPE_MAX +}; + + + +enum { + IFLA_OFFLOAD_XSTATS_UNSPEC, + IFLA_OFFLOAD_XSTATS_CPU_HIT, + __IFLA_OFFLOAD_XSTATS_MAX +}; +# 986 "./include/uapi/linux/if_link.h" +enum { + XDP_ATTACHED_NONE = 0, + XDP_ATTACHED_DRV, + XDP_ATTACHED_SKB, + XDP_ATTACHED_HW, + XDP_ATTACHED_MULTI, +}; + +enum { + IFLA_XDP_UNSPEC, + IFLA_XDP_FD, + IFLA_XDP_ATTACHED, + IFLA_XDP_FLAGS, + IFLA_XDP_PROG_ID, + IFLA_XDP_DRV_PROG_ID, + IFLA_XDP_SKB_PROG_ID, + IFLA_XDP_HW_PROG_ID, + IFLA_XDP_EXPECTED_FD, + __IFLA_XDP_MAX, +}; + + + +enum { + IFLA_EVENT_NONE, + IFLA_EVENT_REBOOT, + IFLA_EVENT_FEATURES, + IFLA_EVENT_BONDING_FAILOVER, + IFLA_EVENT_NOTIFY_PEERS, + IFLA_EVENT_IGMP_RESEND, + IFLA_EVENT_BONDING_OPTIONS, +}; + + + +enum { + IFLA_TUN_UNSPEC, + IFLA_TUN_OWNER, + IFLA_TUN_GROUP, + IFLA_TUN_TYPE, + IFLA_TUN_PI, + IFLA_TUN_VNET_HDR, + IFLA_TUN_PERSIST, + IFLA_TUN_MULTI_QUEUE, + IFLA_TUN_NUM_QUEUES, + IFLA_TUN_NUM_DISABLED_QUEUES, + __IFLA_TUN_MAX, +}; +# 1044 "./include/uapi/linux/if_link.h" +enum { + IFLA_RMNET_UNSPEC, + IFLA_RMNET_MUX_ID, + IFLA_RMNET_FLAGS, + __IFLA_RMNET_MAX, +}; + + + +struct ifla_rmnet_flags { + __u32 flags; + __u32 mask; +}; +# 6 "./include/linux/if_link.h" 2 + + + +struct ifla_vf_stats { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 broadcast; + __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; +}; + +struct ifla_vf_info { + __u32 vf; + __u8 mac[32]; + __u32 vlan; + __u32 qos; + __u32 spoofchk; + __u32 linkstate; + __u32 min_tx_rate; + __u32 max_tx_rate; + __u32 rss_query_en; + __u32 trusted; + __be16 vlan_proto; +}; +# 33 "./include/uapi/linux/netdevice.h" 2 +# 49 "./include/uapi/linux/netdevice.h" +enum { + IF_PORT_UNKNOWN = 0, + IF_PORT_10BASE2, + IF_PORT_10BASET, + IF_PORT_AUI, + IF_PORT_100BASET, + IF_PORT_100BASETX, + IF_PORT_100BASEFX +}; +# 48 "./include/linux/netdevice.h" 2 +# 1 "./include/uapi/linux/if_bonding.h" 1 +# 108 "./include/uapi/linux/if_bonding.h" +typedef struct ifbond { + __s32 bond_mode; + __s32 num_slaves; + __s32 miimon; +} ifbond; + +typedef struct ifslave { + __s32 slave_id; + char slave_name[16]; + __s8 link; + __s8 state; + __u32 link_failure_count; +} ifslave; + +struct ad_info { + __u16 aggregator_id; + __u16 ports; + __u16 actor_key; + __u16 partner_key; + __u8 partner_system[6]; +}; + + +enum { + BOND_XSTATS_UNSPEC, + BOND_XSTATS_3AD, + __BOND_XSTATS_MAX +}; + + + +enum { + BOND_3AD_STAT_LACPDU_RX, + BOND_3AD_STAT_LACPDU_TX, + BOND_3AD_STAT_LACPDU_UNKNOWN_RX, + BOND_3AD_STAT_LACPDU_ILLEGAL_RX, + BOND_3AD_STAT_MARKER_RX, + BOND_3AD_STAT_MARKER_TX, + BOND_3AD_STAT_MARKER_RESP_RX, + BOND_3AD_STAT_MARKER_RESP_TX, + BOND_3AD_STAT_MARKER_UNKNOWN_RX, + BOND_3AD_STAT_PAD, + __BOND_3AD_STAT_MAX +}; +# 49 "./include/linux/netdevice.h" 2 +# 1 "./include/uapi/linux/pkt_cls.h" 1 + + + + + +# 1 "./include/uapi/linux/pkt_sched.h" 1 +# 34 "./include/uapi/linux/pkt_sched.h" +struct tc_stats { + __u64 bytes; + __u32 packets; + __u32 drops; + __u32 overlimits; + + __u32 bps; + __u32 pps; + __u32 qlen; + __u32 backlog; +}; + +struct tc_estimator { + signed char interval; + unsigned char ewma_log; +}; +# 84 "./include/uapi/linux/pkt_sched.h" +enum tc_link_layer { + TC_LINKLAYER_UNAWARE, + TC_LINKLAYER_ETHERNET, + TC_LINKLAYER_ATM, +}; + + +struct tc_ratespec { + unsigned char cell_log; + __u8 linklayer; + unsigned short overhead; + short cell_align; + unsigned short mpu; + __u32 rate; +}; + + + +struct tc_sizespec { + unsigned char cell_log; + unsigned char size_log; + short cell_align; + int overhead; + unsigned int linklayer; + unsigned int mpu; + unsigned int mtu; + unsigned int tsize; +}; + +enum { + TCA_STAB_UNSPEC, + TCA_STAB_BASE, + TCA_STAB_DATA, + __TCA_STAB_MAX +}; + + + + + +struct tc_fifo_qopt { + __u32 limit; +}; +# 139 "./include/uapi/linux/pkt_sched.h" +struct tc_skbprio_qopt { + __u32 limit; +}; + + + + + + +struct tc_prio_qopt { + int bands; + __u8 priomap[15 +1]; +}; + + + +struct tc_multiq_qopt { + __u16 bands; + __u16 max_bands; +}; +# 167 "./include/uapi/linux/pkt_sched.h" +struct tc_plug_qopt { +# 177 "./include/uapi/linux/pkt_sched.h" + int action; + __u32 limit; +}; + + + +struct tc_tbf_qopt { + struct tc_ratespec rate; + struct tc_ratespec peakrate; + __u32 limit; + __u32 buffer; + __u32 mtu; +}; + +enum { + TCA_TBF_UNSPEC, + TCA_TBF_PARMS, + TCA_TBF_RTAB, + TCA_TBF_PTAB, + TCA_TBF_RATE64, + TCA_TBF_PRATE64, + TCA_TBF_BURST, + TCA_TBF_PBURST, + TCA_TBF_PAD, + __TCA_TBF_MAX, +}; +# 213 "./include/uapi/linux/pkt_sched.h" +struct tc_sfq_qopt { + unsigned quantum; + int perturb_period; + __u32 limit; + unsigned divisor; + unsigned flows; +}; + +struct tc_sfqred_stats { + __u32 prob_drop; + __u32 forced_drop; + __u32 prob_mark; + __u32 forced_mark; + __u32 prob_mark_head; + __u32 forced_mark_head; +}; + +struct tc_sfq_qopt_v1 { + struct tc_sfq_qopt v0; + unsigned int depth; + unsigned int headdrop; + + __u32 limit; + __u32 qth_min; + __u32 qth_max; + unsigned char Wlog; + unsigned char Plog; + unsigned char Scell_log; + unsigned char flags; + __u32 max_P; + + struct tc_sfqred_stats stats; +}; + + +struct tc_sfq_xstats { + __s32 allot; +}; + + + +enum { + TCA_RED_UNSPEC, + TCA_RED_PARMS, + TCA_RED_STAB, + TCA_RED_MAX_P, + TCA_RED_FLAGS, + __TCA_RED_MAX, +}; + + + +struct tc_red_qopt { + __u32 limit; + __u32 qth_min; + __u32 qth_max; + unsigned char Wlog; + unsigned char Plog; + unsigned char Scell_log; +# 285 "./include/uapi/linux/pkt_sched.h" + unsigned char flags; + + + + +}; + + + +struct tc_red_xstats { + __u32 early; + __u32 pdrop; + __u32 other; + __u32 marked; +}; + + + + + +enum { + TCA_GRED_UNSPEC, + TCA_GRED_PARMS, + TCA_GRED_STAB, + TCA_GRED_DPS, + TCA_GRED_MAX_P, + TCA_GRED_LIMIT, + TCA_GRED_VQ_LIST, + __TCA_GRED_MAX, +}; + + + +enum { + TCA_GRED_VQ_ENTRY_UNSPEC, + TCA_GRED_VQ_ENTRY, + __TCA_GRED_VQ_ENTRY_MAX, +}; + + +enum { + TCA_GRED_VQ_UNSPEC, + TCA_GRED_VQ_PAD, + TCA_GRED_VQ_DP, + TCA_GRED_VQ_STAT_BYTES, + TCA_GRED_VQ_STAT_PACKETS, + TCA_GRED_VQ_STAT_BACKLOG, + TCA_GRED_VQ_STAT_PROB_DROP, + TCA_GRED_VQ_STAT_PROB_MARK, + TCA_GRED_VQ_STAT_FORCED_DROP, + TCA_GRED_VQ_STAT_FORCED_MARK, + TCA_GRED_VQ_STAT_PDROP, + TCA_GRED_VQ_STAT_OTHER, + TCA_GRED_VQ_FLAGS, + __TCA_GRED_VQ_MAX +}; + + + +struct tc_gred_qopt { + __u32 limit; + __u32 qth_min; + __u32 qth_max; + __u32 DP; + __u32 backlog; + __u32 qave; + __u32 forced; + __u32 early; + __u32 other; + __u32 pdrop; + __u8 Wlog; + __u8 Plog; + __u8 Scell_log; + __u8 prio; + __u32 packets; + __u32 bytesin; +}; + + +struct tc_gred_sopt { + __u32 DPs; + __u32 def_DP; + __u8 grio; + __u8 flags; + __u16 pad1; +}; + + + +enum { + TCA_CHOKE_UNSPEC, + TCA_CHOKE_PARMS, + TCA_CHOKE_STAB, + TCA_CHOKE_MAX_P, + __TCA_CHOKE_MAX, +}; + + + +struct tc_choke_qopt { + __u32 limit; + __u32 qth_min; + __u32 qth_max; + unsigned char Wlog; + unsigned char Plog; + unsigned char Scell_log; + unsigned char flags; +}; + +struct tc_choke_xstats { + __u32 early; + __u32 pdrop; + __u32 other; + __u32 marked; + __u32 matched; +}; + + + + + + +struct tc_htb_opt { + struct tc_ratespec rate; + struct tc_ratespec ceil; + __u32 buffer; + __u32 cbuffer; + __u32 quantum; + __u32 level; + __u32 prio; +}; +struct tc_htb_glob { + __u32 version; + __u32 rate2quantum; + __u32 defcls; + __u32 debug; + + + __u32 direct_pkts; +}; +enum { + TCA_HTB_UNSPEC, + TCA_HTB_PARMS, + TCA_HTB_INIT, + TCA_HTB_CTAB, + TCA_HTB_RTAB, + TCA_HTB_DIRECT_QLEN, + TCA_HTB_RATE64, + TCA_HTB_CEIL64, + TCA_HTB_PAD, + __TCA_HTB_MAX, +}; + + + +struct tc_htb_xstats { + __u32 lends; + __u32 borrows; + __u32 giants; + __s32 tokens; + __s32 ctokens; +}; + + + +struct tc_hfsc_qopt { + __u16 defcls; +}; + +struct tc_service_curve { + __u32 m1; + __u32 d; + __u32 m2; +}; + +struct tc_hfsc_stats { + __u64 work; + __u64 rtwork; + __u32 period; + __u32 level; +}; + +enum { + TCA_HFSC_UNSPEC, + TCA_HFSC_RSC, + TCA_HFSC_FSC, + TCA_HFSC_USC, + __TCA_HFSC_MAX, +}; +# 484 "./include/uapi/linux/pkt_sched.h" +struct tc_cbq_lssopt { + unsigned char change; + unsigned char flags; + + + unsigned char ewma_log; + unsigned char level; + + + + + + + __u32 maxidle; + __u32 minidle; + __u32 offtime; + __u32 avpkt; +}; + +struct tc_cbq_wrropt { + unsigned char flags; + unsigned char priority; + unsigned char cpriority; + unsigned char __reserved; + __u32 allot; + __u32 weight; +}; + +struct tc_cbq_ovl { + unsigned char strategy; + + + + + + unsigned char priority2; + __u16 pad; + __u32 penalty; +}; + +struct tc_cbq_police { + unsigned char police; + unsigned char __res1; + unsigned short __res2; +}; + +struct tc_cbq_fopt { + __u32 split; + __u32 defmap; + __u32 defchange; +}; + +struct tc_cbq_xstats { + __u32 borrows; + __u32 overactions; + __s32 avgidle; + __s32 undertime; +}; + +enum { + TCA_CBQ_UNSPEC, + TCA_CBQ_LSSOPT, + TCA_CBQ_WRROPT, + TCA_CBQ_FOPT, + TCA_CBQ_OVL_STRATEGY, + TCA_CBQ_RATE, + TCA_CBQ_RTAB, + TCA_CBQ_POLICE, + __TCA_CBQ_MAX, +}; + + + + + +enum { + TCA_DSMARK_UNSPEC, + TCA_DSMARK_INDICES, + TCA_DSMARK_DEFAULT_INDEX, + TCA_DSMARK_SET_TC_INDEX, + TCA_DSMARK_MASK, + TCA_DSMARK_VALUE, + __TCA_DSMARK_MAX, +}; + + + + + +enum { + TCA_ATM_UNSPEC, + TCA_ATM_FD, + TCA_ATM_PTR, + TCA_ATM_HDR, + TCA_ATM_EXCESS, + TCA_ATM_ADDR, + TCA_ATM_STATE, + __TCA_ATM_MAX, +}; + + + + + +enum { + TCA_NETEM_UNSPEC, + TCA_NETEM_CORR, + TCA_NETEM_DELAY_DIST, + TCA_NETEM_REORDER, + TCA_NETEM_CORRUPT, + TCA_NETEM_LOSS, + TCA_NETEM_RATE, + TCA_NETEM_ECN, + TCA_NETEM_RATE64, + TCA_NETEM_PAD, + TCA_NETEM_LATENCY64, + TCA_NETEM_JITTER64, + TCA_NETEM_SLOT, + TCA_NETEM_SLOT_DIST, + __TCA_NETEM_MAX, +}; + + + +struct tc_netem_qopt { + __u32 latency; + __u32 limit; + __u32 loss; + __u32 gap; + __u32 duplicate; + __u32 jitter; +}; + +struct tc_netem_corr { + __u32 delay_corr; + __u32 loss_corr; + __u32 dup_corr; +}; + +struct tc_netem_reorder { + __u32 probability; + __u32 correlation; +}; + +struct tc_netem_corrupt { + __u32 probability; + __u32 correlation; +}; + +struct tc_netem_rate { + __u32 rate; + __s32 packet_overhead; + __u32 cell_size; + __s32 cell_overhead; +}; + +struct tc_netem_slot { + __s64 min_delay; + __s64 max_delay; + __s32 max_packets; + __s32 max_bytes; + __s64 dist_delay; + __s64 dist_jitter; +}; + +enum { + NETEM_LOSS_UNSPEC, + NETEM_LOSS_GI, + NETEM_LOSS_GE, + __NETEM_LOSS_MAX +}; + + + +struct tc_netem_gimodel { + __u32 p13; + __u32 p31; + __u32 p32; + __u32 p14; + __u32 p23; +}; + + +struct tc_netem_gemodel { + __u32 p; + __u32 r; + __u32 h; + __u32 k1; +}; + + + + + + +enum { + TCA_DRR_UNSPEC, + TCA_DRR_QUANTUM, + __TCA_DRR_MAX +}; + + + +struct tc_drr_stats { + __u32 deficit; +}; + + + + + +enum { + TC_MQPRIO_HW_OFFLOAD_NONE, + TC_MQPRIO_HW_OFFLOAD_TCS, + __TC_MQPRIO_HW_OFFLOAD_MAX +}; + + + +enum { + TC_MQPRIO_MODE_DCB, + TC_MQPRIO_MODE_CHANNEL, + __TC_MQPRIO_MODE_MAX +}; + + + +enum { + TC_MQPRIO_SHAPER_DCB, + TC_MQPRIO_SHAPER_BW_RATE, + __TC_MQPRIO_SHAPER_MAX +}; + + + +struct tc_mqprio_qopt { + __u8 num_tc; + __u8 prio_tc_map[15 + 1]; + __u8 hw; + __u16 count[16]; + __u16 offset[16]; +}; + + + + + + +enum { + TCA_MQPRIO_UNSPEC, + TCA_MQPRIO_MODE, + TCA_MQPRIO_SHAPER, + TCA_MQPRIO_MIN_RATE64, + TCA_MQPRIO_MAX_RATE64, + __TCA_MQPRIO_MAX, +}; + + + + + +enum { + TCA_SFB_UNSPEC, + TCA_SFB_PARMS, + __TCA_SFB_MAX, +}; + + + + + + +struct tc_sfb_qopt { + __u32 rehash_interval; + __u32 warmup_time; + __u32 max; + __u32 bin_size; + __u32 increment; + __u32 decrement; + __u32 limit; + __u32 penalty_rate; + __u32 penalty_burst; +}; + +struct tc_sfb_xstats { + __u32 earlydrop; + __u32 penaltydrop; + __u32 bucketdrop; + __u32 queuedrop; + __u32 childdrop; + __u32 marked; + __u32 maxqlen; + __u32 maxprob; + __u32 avgprob; +}; + + + + +enum { + TCA_QFQ_UNSPEC, + TCA_QFQ_WEIGHT, + TCA_QFQ_LMAX, + __TCA_QFQ_MAX +}; + + + +struct tc_qfq_stats { + __u32 weight; + __u32 lmax; +}; + + + +enum { + TCA_CODEL_UNSPEC, + TCA_CODEL_TARGET, + TCA_CODEL_LIMIT, + TCA_CODEL_INTERVAL, + TCA_CODEL_ECN, + TCA_CODEL_CE_THRESHOLD, + __TCA_CODEL_MAX +}; + + + +struct tc_codel_xstats { + __u32 maxpacket; + __u32 count; + + + __u32 lastcount; + __u32 ldelay; + __s32 drop_next; + __u32 drop_overlimit; + __u32 ecn_mark; + __u32 dropping; + __u32 ce_mark; +}; + + + +enum { + TCA_FQ_CODEL_UNSPEC, + TCA_FQ_CODEL_TARGET, + TCA_FQ_CODEL_LIMIT, + TCA_FQ_CODEL_INTERVAL, + TCA_FQ_CODEL_ECN, + TCA_FQ_CODEL_FLOWS, + TCA_FQ_CODEL_QUANTUM, + TCA_FQ_CODEL_CE_THRESHOLD, + TCA_FQ_CODEL_DROP_BATCH_SIZE, + TCA_FQ_CODEL_MEMORY_LIMIT, + __TCA_FQ_CODEL_MAX +}; + + + +enum { + TCA_FQ_CODEL_XSTATS_QDISC, + TCA_FQ_CODEL_XSTATS_CLASS, +}; + +struct tc_fq_codel_qd_stats { + __u32 maxpacket; + __u32 drop_overlimit; + + + __u32 ecn_mark; + + + __u32 new_flow_count; + + + __u32 new_flows_len; + __u32 old_flows_len; + __u32 ce_mark; + __u32 memory_usage; + __u32 drop_overmemory; +}; + +struct tc_fq_codel_cl_stats { + __s32 deficit; + __u32 ldelay; + + + __u32 count; + __u32 lastcount; + __u32 dropping; + __s32 drop_next; +}; + +struct tc_fq_codel_xstats { + __u32 type; + union { + struct tc_fq_codel_qd_stats qdisc_stats; + struct tc_fq_codel_cl_stats class_stats; + }; +}; + + + +enum { + TCA_FQ_UNSPEC, + + TCA_FQ_PLIMIT, + + TCA_FQ_FLOW_PLIMIT, + + TCA_FQ_QUANTUM, + + TCA_FQ_INITIAL_QUANTUM, + + TCA_FQ_RATE_ENABLE, + + TCA_FQ_FLOW_DEFAULT_RATE, + + TCA_FQ_FLOW_MAX_RATE, + + TCA_FQ_BUCKETS_LOG, + + TCA_FQ_FLOW_REFILL_DELAY, + + TCA_FQ_ORPHAN_MASK, + + TCA_FQ_LOW_RATE_THRESHOLD, + + TCA_FQ_CE_THRESHOLD, + + TCA_FQ_TIMER_SLACK, + + TCA_FQ_HORIZON, + + TCA_FQ_HORIZON_DROP, + + __TCA_FQ_MAX +}; + + + +struct tc_fq_qd_stats { + __u64 gc_flows; + __u64 highprio_packets; + __u64 tcp_retrans; + __u64 throttled; + __u64 flows_plimit; + __u64 pkts_too_long; + __u64 allocation_errors; + __s64 time_next_delayed_flow; + __u32 flows; + __u32 inactive_flows; + __u32 throttled_flows; + __u32 unthrottle_latency_ns; + __u64 ce_mark; + __u64 horizon_drops; + __u64 horizon_caps; +}; + + + +enum { + TCA_HHF_UNSPEC, + TCA_HHF_BACKLOG_LIMIT, + TCA_HHF_QUANTUM, + TCA_HHF_HH_FLOWS_LIMIT, + TCA_HHF_RESET_TIMEOUT, + TCA_HHF_ADMIT_BYTES, + TCA_HHF_EVICT_TIMEOUT, + TCA_HHF_NON_HH_WEIGHT, + __TCA_HHF_MAX +}; + + + +struct tc_hhf_xstats { + __u32 drop_overlimit; + + + __u32 hh_overlimit; + __u32 hh_tot_count; + __u32 hh_cur_count; +}; + + +enum { + TCA_PIE_UNSPEC, + TCA_PIE_TARGET, + TCA_PIE_LIMIT, + TCA_PIE_TUPDATE, + TCA_PIE_ALPHA, + TCA_PIE_BETA, + TCA_PIE_ECN, + TCA_PIE_BYTEMODE, + TCA_PIE_DQ_RATE_ESTIMATOR, + __TCA_PIE_MAX +}; + + +struct tc_pie_xstats { + __u64 prob; + __u32 delay; + __u32 avg_dq_rate; + + + __u32 dq_rate_estimating; + __u32 packets_in; + __u32 dropped; + __u32 overlimit; + + + __u32 maxq; + __u32 ecn_mark; +}; + + +enum { + TCA_FQ_PIE_UNSPEC, + TCA_FQ_PIE_LIMIT, + TCA_FQ_PIE_FLOWS, + TCA_FQ_PIE_TARGET, + TCA_FQ_PIE_TUPDATE, + TCA_FQ_PIE_ALPHA, + TCA_FQ_PIE_BETA, + TCA_FQ_PIE_QUANTUM, + TCA_FQ_PIE_MEMORY_LIMIT, + TCA_FQ_PIE_ECN_PROB, + TCA_FQ_PIE_ECN, + TCA_FQ_PIE_BYTEMODE, + TCA_FQ_PIE_DQ_RATE_ESTIMATOR, + __TCA_FQ_PIE_MAX +}; + + +struct tc_fq_pie_xstats { + __u32 packets_in; + __u32 dropped; + __u32 overlimit; + __u32 overmemory; + __u32 ecn_mark; + __u32 new_flow_count; + __u32 new_flows_len; + __u32 old_flows_len; + __u32 memory_usage; +}; + + +struct tc_cbs_qopt { + __u8 offload; + __u8 _pad[3]; + __s32 hicredit; + __s32 locredit; + __s32 idleslope; + __s32 sendslope; +}; + +enum { + TCA_CBS_UNSPEC, + TCA_CBS_PARMS, + __TCA_CBS_MAX, +}; + + + + + +struct tc_etf_qopt { + __s32 delta; + __s32 clockid; + __u32 flags; + + + +}; + +enum { + TCA_ETF_UNSPEC, + TCA_ETF_PARMS, + __TCA_ETF_MAX, +}; + + + + + +enum { + TCA_CAKE_UNSPEC, + TCA_CAKE_PAD, + TCA_CAKE_BASE_RATE64, + TCA_CAKE_DIFFSERV_MODE, + TCA_CAKE_ATM, + TCA_CAKE_FLOW_MODE, + TCA_CAKE_OVERHEAD, + TCA_CAKE_RTT, + TCA_CAKE_TARGET, + TCA_CAKE_AUTORATE, + TCA_CAKE_MEMORY, + TCA_CAKE_NAT, + TCA_CAKE_RAW, + TCA_CAKE_WASH, + TCA_CAKE_MPU, + TCA_CAKE_INGRESS, + TCA_CAKE_ACK_FILTER, + TCA_CAKE_SPLIT_GSO, + TCA_CAKE_FWMARK, + __TCA_CAKE_MAX +}; + + +enum { + __TCA_CAKE_STATS_INVALID, + TCA_CAKE_STATS_PAD, + TCA_CAKE_STATS_CAPACITY_ESTIMATE64, + TCA_CAKE_STATS_MEMORY_LIMIT, + TCA_CAKE_STATS_MEMORY_USED, + TCA_CAKE_STATS_AVG_NETOFF, + TCA_CAKE_STATS_MIN_NETLEN, + TCA_CAKE_STATS_MAX_NETLEN, + TCA_CAKE_STATS_MIN_ADJLEN, + TCA_CAKE_STATS_MAX_ADJLEN, + TCA_CAKE_STATS_TIN_STATS, + TCA_CAKE_STATS_DEFICIT, + TCA_CAKE_STATS_COBALT_COUNT, + TCA_CAKE_STATS_DROPPING, + TCA_CAKE_STATS_DROP_NEXT_US, + TCA_CAKE_STATS_P_DROP, + TCA_CAKE_STATS_BLUE_TIMER_US, + __TCA_CAKE_STATS_MAX +}; + + +enum { + __TCA_CAKE_TIN_STATS_INVALID, + TCA_CAKE_TIN_STATS_PAD, + TCA_CAKE_TIN_STATS_SENT_PACKETS, + TCA_CAKE_TIN_STATS_SENT_BYTES64, + TCA_CAKE_TIN_STATS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS, + TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64, + TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS, + TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64, + TCA_CAKE_TIN_STATS_BACKLOG_PACKETS, + TCA_CAKE_TIN_STATS_BACKLOG_BYTES, + TCA_CAKE_TIN_STATS_THRESHOLD_RATE64, + TCA_CAKE_TIN_STATS_TARGET_US, + TCA_CAKE_TIN_STATS_INTERVAL_US, + TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS, + TCA_CAKE_TIN_STATS_WAY_MISSES, + TCA_CAKE_TIN_STATS_WAY_COLLISIONS, + TCA_CAKE_TIN_STATS_PEAK_DELAY_US, + TCA_CAKE_TIN_STATS_AVG_DELAY_US, + TCA_CAKE_TIN_STATS_BASE_DELAY_US, + TCA_CAKE_TIN_STATS_SPARSE_FLOWS, + TCA_CAKE_TIN_STATS_BULK_FLOWS, + TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS, + TCA_CAKE_TIN_STATS_MAX_SKBLEN, + TCA_CAKE_TIN_STATS_FLOW_QUANTUM, + __TCA_CAKE_TIN_STATS_MAX +}; + + + +enum { + CAKE_FLOW_NONE = 0, + CAKE_FLOW_SRC_IP, + CAKE_FLOW_DST_IP, + CAKE_FLOW_HOSTS, + CAKE_FLOW_FLOWS, + CAKE_FLOW_DUAL_SRC, + CAKE_FLOW_DUAL_DST, + CAKE_FLOW_TRIPLE, + CAKE_FLOW_MAX, +}; + +enum { + CAKE_DIFFSERV_DIFFSERV3 = 0, + CAKE_DIFFSERV_DIFFSERV4, + CAKE_DIFFSERV_DIFFSERV8, + CAKE_DIFFSERV_BESTEFFORT, + CAKE_DIFFSERV_PRECEDENCE, + CAKE_DIFFSERV_MAX +}; + +enum { + CAKE_ACK_NONE = 0, + CAKE_ACK_FILTER, + CAKE_ACK_AGGRESSIVE, + CAKE_ACK_MAX +}; + +enum { + CAKE_ATM_NONE = 0, + CAKE_ATM_ATM, + CAKE_ATM_PTM, + CAKE_ATM_MAX +}; + + + +enum { + TC_TAPRIO_CMD_SET_GATES = 0x00, + TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, + TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, +}; + +enum { + TCA_TAPRIO_SCHED_ENTRY_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY_INDEX, + TCA_TAPRIO_SCHED_ENTRY_CMD, + TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, + TCA_TAPRIO_SCHED_ENTRY_INTERVAL, + __TCA_TAPRIO_SCHED_ENTRY_MAX, +}; +# 1207 "./include/uapi/linux/pkt_sched.h" +enum { + TCA_TAPRIO_SCHED_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY, + __TCA_TAPRIO_SCHED_MAX, +}; +# 1228 "./include/uapi/linux/pkt_sched.h" +enum { + TCA_TAPRIO_ATTR_UNSPEC, + TCA_TAPRIO_ATTR_PRIOMAP, + TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, + TCA_TAPRIO_ATTR_SCHED_BASE_TIME, + TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, + TCA_TAPRIO_ATTR_SCHED_CLOCKID, + TCA_TAPRIO_PAD, + TCA_TAPRIO_ATTR_ADMIN_SCHED, + TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, + TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, + TCA_TAPRIO_ATTR_FLAGS, + TCA_TAPRIO_ATTR_TXTIME_DELAY, + __TCA_TAPRIO_ATTR_MAX, +}; + + + + + + + +enum { + TCA_ETS_UNSPEC, + TCA_ETS_NBANDS, + TCA_ETS_NSTRICT, + TCA_ETS_QUANTA, + TCA_ETS_QUANTA_BAND, + TCA_ETS_PRIOMAP, + TCA_ETS_PRIOMAP_BAND, + __TCA_ETS_MAX, +}; +# 7 "./include/uapi/linux/pkt_cls.h" 2 + + + + +enum { + TCA_ACT_UNSPEC, + TCA_ACT_KIND, + TCA_ACT_OPTIONS, + TCA_ACT_INDEX, + TCA_ACT_STATS, + TCA_ACT_PAD, + TCA_ACT_COOKIE, + TCA_ACT_FLAGS, + TCA_ACT_HW_STATS, + TCA_ACT_USED_HW_STATS, + __TCA_ACT_MAX +}; +# 115 "./include/uapi/linux/pkt_cls.h" +enum tca_id { + TCA_ID_UNSPEC = 0, + TCA_ID_POLICE = 1, + TCA_ID_GACT = 5, + TCA_ID_IPT = 6, + TCA_ID_PEDIT = 7, + TCA_ID_MIRRED = 8, + TCA_ID_NAT = 9, + TCA_ID_XT = 10, + TCA_ID_SKBEDIT = 11, + TCA_ID_VLAN = 12, + TCA_ID_BPF = 13, + TCA_ID_CONNMARK = 14, + TCA_ID_SKBMOD = 15, + TCA_ID_CSUM = 16, + TCA_ID_TUNNEL_KEY = 17, + TCA_ID_SIMP = 22, + TCA_ID_IFE = 25, + TCA_ID_SAMPLE = 26, + TCA_ID_CTINFO, + TCA_ID_MPLS, + TCA_ID_CT, + TCA_ID_GATE, + + __TCA_ID_MAX = 255 +}; + + + +struct tc_police { + __u32 index; + int action; + + + + + + + __u32 limit; + __u32 burst; + __u32 mtu; + struct tc_ratespec rate; + struct tc_ratespec peakrate; + int refcnt; + int bindcnt; + __u32 capab; +}; + +struct tcf_t { + __u64 install; + __u64 lastuse; + __u64 expires; + __u64 firstuse; +}; + +struct tc_cnt { + int refcnt; + int bindcnt; +}; +# 182 "./include/uapi/linux/pkt_cls.h" +enum { + TCA_POLICE_UNSPEC, + TCA_POLICE_TBF, + TCA_POLICE_RATE, + TCA_POLICE_PEAKRATE, + TCA_POLICE_AVRATE, + TCA_POLICE_RESULT, + TCA_POLICE_TM, + TCA_POLICE_PAD, + TCA_POLICE_RATE64, + TCA_POLICE_PEAKRATE64, + __TCA_POLICE_MAX + +}; +# 216 "./include/uapi/linux/pkt_cls.h" +enum { + TCA_U32_UNSPEC, + TCA_U32_CLASSID, + TCA_U32_HASH, + TCA_U32_LINK, + TCA_U32_DIVISOR, + TCA_U32_SEL, + TCA_U32_POLICE, + TCA_U32_ACT, + TCA_U32_INDEV, + TCA_U32_PCNT, + TCA_U32_MARK, + TCA_U32_FLAGS, + TCA_U32_PAD, + __TCA_U32_MAX +}; + + + +struct tc_u32_key { + __be32 mask; + __be32 val; + int off; + int offmask; +}; + +struct tc_u32_sel { + unsigned char flags; + unsigned char offshift; + unsigned char nkeys; + + __be16 offmask; + __u16 off; + short offoff; + + short hoff; + __be32 hmask; + struct tc_u32_key keys[0]; +}; + +struct tc_u32_mark { + __u32 val; + __u32 mask; + __u32 success; +}; + +struct tc_u32_pcnt { + __u64 rcnt; + __u64 rhit; + __u64 kcnts[0]; +}; +# 280 "./include/uapi/linux/pkt_cls.h" +enum { + TCA_RSVP_UNSPEC, + TCA_RSVP_CLASSID, + TCA_RSVP_DST, + TCA_RSVP_SRC, + TCA_RSVP_PINFO, + TCA_RSVP_POLICE, + TCA_RSVP_ACT, + __TCA_RSVP_MAX +}; + + + +struct tc_rsvp_gpi { + __u32 key; + __u32 mask; + int offset; +}; + +struct tc_rsvp_pinfo { + struct tc_rsvp_gpi dpi; + struct tc_rsvp_gpi spi; + __u8 protocol; + __u8 tunnelid; + __u8 tunnelhdr; + __u8 pad; +}; + + + +enum { + TCA_ROUTE4_UNSPEC, + TCA_ROUTE4_CLASSID, + TCA_ROUTE4_TO, + TCA_ROUTE4_FROM, + TCA_ROUTE4_IIF, + TCA_ROUTE4_POLICE, + TCA_ROUTE4_ACT, + __TCA_ROUTE4_MAX +}; + + + + + + +enum { + TCA_FW_UNSPEC, + TCA_FW_CLASSID, + TCA_FW_POLICE, + TCA_FW_INDEV, + TCA_FW_ACT, + TCA_FW_MASK, + __TCA_FW_MAX +}; + + + + + +enum { + TCA_TCINDEX_UNSPEC, + TCA_TCINDEX_HASH, + TCA_TCINDEX_MASK, + TCA_TCINDEX_SHIFT, + TCA_TCINDEX_FALL_THROUGH, + TCA_TCINDEX_CLASSID, + TCA_TCINDEX_POLICE, + TCA_TCINDEX_ACT, + __TCA_TCINDEX_MAX +}; + + + + + +enum { + FLOW_KEY_SRC, + FLOW_KEY_DST, + FLOW_KEY_PROTO, + FLOW_KEY_PROTO_SRC, + FLOW_KEY_PROTO_DST, + FLOW_KEY_IIF, + FLOW_KEY_PRIORITY, + FLOW_KEY_MARK, + FLOW_KEY_NFCT, + FLOW_KEY_NFCT_SRC, + FLOW_KEY_NFCT_DST, + FLOW_KEY_NFCT_PROTO_SRC, + FLOW_KEY_NFCT_PROTO_DST, + FLOW_KEY_RTCLASSID, + FLOW_KEY_SKUID, + FLOW_KEY_SKGID, + FLOW_KEY_VLAN_TAG, + FLOW_KEY_RXHASH, + __FLOW_KEY_MAX, +}; + + + +enum { + FLOW_MODE_MAP, + FLOW_MODE_HASH, +}; + +enum { + TCA_FLOW_UNSPEC, + TCA_FLOW_KEYS, + TCA_FLOW_MODE, + TCA_FLOW_BASECLASS, + TCA_FLOW_RSHIFT, + TCA_FLOW_ADDEND, + TCA_FLOW_MASK, + TCA_FLOW_XOR, + TCA_FLOW_DIVISOR, + TCA_FLOW_ACT, + TCA_FLOW_POLICE, + TCA_FLOW_EMATCHES, + TCA_FLOW_PERTURB, + __TCA_FLOW_MAX +}; + + + + + +struct tc_basic_pcnt { + __u64 rcnt; + __u64 rhit; +}; + +enum { + TCA_BASIC_UNSPEC, + TCA_BASIC_CLASSID, + TCA_BASIC_EMATCHES, + TCA_BASIC_ACT, + TCA_BASIC_POLICE, + TCA_BASIC_PCNT, + TCA_BASIC_PAD, + __TCA_BASIC_MAX +}; + + + + + + +enum { + TCA_CGROUP_UNSPEC, + TCA_CGROUP_ACT, + TCA_CGROUP_POLICE, + TCA_CGROUP_EMATCHES, + __TCA_CGROUP_MAX, +}; + + + + + + + +enum { + TCA_BPF_UNSPEC, + TCA_BPF_ACT, + TCA_BPF_POLICE, + TCA_BPF_CLASSID, + TCA_BPF_OPS_LEN, + TCA_BPF_OPS, + TCA_BPF_FD, + TCA_BPF_NAME, + TCA_BPF_FLAGS, + TCA_BPF_FLAGS_GEN, + TCA_BPF_TAG, + TCA_BPF_ID, + __TCA_BPF_MAX, +}; + + + + + +enum { + TCA_FLOWER_UNSPEC, + TCA_FLOWER_CLASSID, + TCA_FLOWER_INDEV, + TCA_FLOWER_ACT, + TCA_FLOWER_KEY_ETH_DST, + TCA_FLOWER_KEY_ETH_DST_MASK, + TCA_FLOWER_KEY_ETH_SRC, + TCA_FLOWER_KEY_ETH_SRC_MASK, + TCA_FLOWER_KEY_ETH_TYPE, + TCA_FLOWER_KEY_IP_PROTO, + TCA_FLOWER_KEY_IPV4_SRC, + TCA_FLOWER_KEY_IPV4_SRC_MASK, + TCA_FLOWER_KEY_IPV4_DST, + TCA_FLOWER_KEY_IPV4_DST_MASK, + TCA_FLOWER_KEY_IPV6_SRC, + TCA_FLOWER_KEY_IPV6_SRC_MASK, + TCA_FLOWER_KEY_IPV6_DST, + TCA_FLOWER_KEY_IPV6_DST_MASK, + TCA_FLOWER_KEY_TCP_SRC, + TCA_FLOWER_KEY_TCP_DST, + TCA_FLOWER_KEY_UDP_SRC, + TCA_FLOWER_KEY_UDP_DST, + + TCA_FLOWER_FLAGS, + TCA_FLOWER_KEY_VLAN_ID, + TCA_FLOWER_KEY_VLAN_PRIO, + TCA_FLOWER_KEY_VLAN_ETH_TYPE, + + TCA_FLOWER_KEY_ENC_KEY_ID, + TCA_FLOWER_KEY_ENC_IPV4_SRC, + TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, + TCA_FLOWER_KEY_ENC_IPV4_DST, + TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, + TCA_FLOWER_KEY_ENC_IPV6_SRC, + TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, + TCA_FLOWER_KEY_ENC_IPV6_DST, + TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, + + TCA_FLOWER_KEY_TCP_SRC_MASK, + TCA_FLOWER_KEY_TCP_DST_MASK, + TCA_FLOWER_KEY_UDP_SRC_MASK, + TCA_FLOWER_KEY_UDP_DST_MASK, + TCA_FLOWER_KEY_SCTP_SRC_MASK, + TCA_FLOWER_KEY_SCTP_DST_MASK, + + TCA_FLOWER_KEY_SCTP_SRC, + TCA_FLOWER_KEY_SCTP_DST, + + TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, + TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, + TCA_FLOWER_KEY_ENC_UDP_DST_PORT, + TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, + + TCA_FLOWER_KEY_FLAGS, + TCA_FLOWER_KEY_FLAGS_MASK, + + TCA_FLOWER_KEY_ICMPV4_CODE, + TCA_FLOWER_KEY_ICMPV4_CODE_MASK, + TCA_FLOWER_KEY_ICMPV4_TYPE, + TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, + TCA_FLOWER_KEY_ICMPV6_CODE, + TCA_FLOWER_KEY_ICMPV6_CODE_MASK, + TCA_FLOWER_KEY_ICMPV6_TYPE, + TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, + + TCA_FLOWER_KEY_ARP_SIP, + TCA_FLOWER_KEY_ARP_SIP_MASK, + TCA_FLOWER_KEY_ARP_TIP, + TCA_FLOWER_KEY_ARP_TIP_MASK, + TCA_FLOWER_KEY_ARP_OP, + TCA_FLOWER_KEY_ARP_OP_MASK, + TCA_FLOWER_KEY_ARP_SHA, + TCA_FLOWER_KEY_ARP_SHA_MASK, + TCA_FLOWER_KEY_ARP_THA, + TCA_FLOWER_KEY_ARP_THA_MASK, + + TCA_FLOWER_KEY_MPLS_TTL, + TCA_FLOWER_KEY_MPLS_BOS, + TCA_FLOWER_KEY_MPLS_TC, + TCA_FLOWER_KEY_MPLS_LABEL, + + TCA_FLOWER_KEY_TCP_FLAGS, + TCA_FLOWER_KEY_TCP_FLAGS_MASK, + + TCA_FLOWER_KEY_IP_TOS, + TCA_FLOWER_KEY_IP_TOS_MASK, + TCA_FLOWER_KEY_IP_TTL, + TCA_FLOWER_KEY_IP_TTL_MASK, + + TCA_FLOWER_KEY_CVLAN_ID, + TCA_FLOWER_KEY_CVLAN_PRIO, + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + + TCA_FLOWER_KEY_ENC_IP_TOS, + TCA_FLOWER_KEY_ENC_IP_TOS_MASK, + TCA_FLOWER_KEY_ENC_IP_TTL, + TCA_FLOWER_KEY_ENC_IP_TTL_MASK, + + TCA_FLOWER_KEY_ENC_OPTS, + TCA_FLOWER_KEY_ENC_OPTS_MASK, + + TCA_FLOWER_IN_HW_COUNT, + + TCA_FLOWER_KEY_PORT_SRC_MIN, + TCA_FLOWER_KEY_PORT_SRC_MAX, + TCA_FLOWER_KEY_PORT_DST_MIN, + TCA_FLOWER_KEY_PORT_DST_MAX, + + TCA_FLOWER_KEY_CT_STATE, + TCA_FLOWER_KEY_CT_STATE_MASK, + TCA_FLOWER_KEY_CT_ZONE, + TCA_FLOWER_KEY_CT_ZONE_MASK, + TCA_FLOWER_KEY_CT_MARK, + TCA_FLOWER_KEY_CT_MARK_MASK, + TCA_FLOWER_KEY_CT_LABELS, + TCA_FLOWER_KEY_CT_LABELS_MASK, + + TCA_FLOWER_KEY_MPLS_OPTS, + + __TCA_FLOWER_MAX, +}; + + + +enum { + TCA_FLOWER_KEY_CT_FLAGS_NEW = 1 << 0, + TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, + TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, + TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, +}; + +enum { + TCA_FLOWER_KEY_ENC_OPTS_UNSPEC, + TCA_FLOWER_KEY_ENC_OPTS_GENEVE, + + + + TCA_FLOWER_KEY_ENC_OPTS_VXLAN, + + + + TCA_FLOWER_KEY_ENC_OPTS_ERSPAN, + + + + __TCA_FLOWER_KEY_ENC_OPTS_MAX, +}; + + + +enum { + TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, + TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, + TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, + + __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, +}; + + + + +enum { + TCA_FLOWER_KEY_ENC_OPT_VXLAN_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, + __TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, +}; + + + + +enum { + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, + __TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, +}; + + + + +enum { + TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC, + TCA_FLOWER_KEY_MPLS_OPTS_LSE, + __TCA_FLOWER_KEY_MPLS_OPTS_MAX, +}; + + + +enum { + TCA_FLOWER_KEY_MPLS_OPT_LSE_UNSPEC, + TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, + TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, + TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, + TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, + TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, + __TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, +}; + + + + +enum { + TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0), + TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), +}; + + + + + +struct tc_matchall_pcnt { + __u64 rhit; +}; + +enum { + TCA_MATCHALL_UNSPEC, + TCA_MATCHALL_CLASSID, + TCA_MATCHALL_ACT, + TCA_MATCHALL_FLAGS, + TCA_MATCHALL_PCNT, + TCA_MATCHALL_PAD, + __TCA_MATCHALL_MAX, +}; + + + + + +struct tcf_ematch_tree_hdr { + __u16 nmatches; + __u16 progid; +}; + +enum { + TCA_EMATCH_TREE_UNSPEC, + TCA_EMATCH_TREE_HDR, + TCA_EMATCH_TREE_LIST, + __TCA_EMATCH_TREE_MAX +}; + + +struct tcf_ematch_hdr { + __u16 matchid; + __u16 kind; + __u16 flags; + __u16 pad; +}; +# 736 "./include/uapi/linux/pkt_cls.h" +enum { + TCF_LAYER_LINK, + TCF_LAYER_NETWORK, + TCF_LAYER_TRANSPORT, + __TCF_LAYER_MAX +}; +# 760 "./include/uapi/linux/pkt_cls.h" +enum { + TCF_EM_PROG_TC +}; + +enum { + TCF_EM_OPND_EQ, + TCF_EM_OPND_GT, + TCF_EM_OPND_LT +}; +# 50 "./include/linux/netdevice.h" 2 + + +struct netpoll_info; +struct device; +struct phy_device; +struct dsa_port; +struct ip_tunnel_parm; +struct macsec_context; +struct macsec_ops; + +struct sfp_bus; + +struct wireless_dev; + +struct wpan_dev; +struct mpls_dev; + +struct udp_tunnel_info; +struct bpf_prog; +struct xdp_buff; + +void netdev_set_default_ethtool_ops(struct net_device *dev, + const struct ethtool_ops *ops); +# 112 "./include/linux/netdevice.h" +enum netdev_tx { + __NETDEV_TX_MIN = (-((int)(~0U >> 1)) - 1), + NETDEV_TX_OK = 0x00, + NETDEV_TX_BUSY = 0x10, +}; +typedef enum netdev_tx netdev_tx_t; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dev_xmit_complete(int rc) +{ + + + + + + + if (__builtin_expect(!!(rc < 0x0f), 1)) + return true; + + return false; +} +# 166 "./include/linux/netdevice.h" +struct net_device_stats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long rx_errors; + unsigned long tx_errors; + unsigned long rx_dropped; + unsigned long tx_dropped; + unsigned long multicast; + unsigned long collisions; + unsigned long rx_length_errors; + unsigned long rx_over_errors; + unsigned long rx_crc_errors; + unsigned long rx_frame_errors; + unsigned long rx_fifo_errors; + unsigned long rx_missed_errors; + unsigned long tx_aborted_errors; + unsigned long tx_carrier_errors; + unsigned long tx_fifo_errors; + unsigned long tx_heartbeat_errors; + unsigned long tx_window_errors; + unsigned long rx_compressed; + unsigned long tx_compressed; +}; + + + + + + +# 1 "./include/linux/static_key.h" 1 +# 198 "./include/linux/netdevice.h" 2 +extern struct static_key_false rps_needed; +extern struct static_key_false rfs_needed; + + +struct neighbour; +struct neigh_parms; +struct sk_buff; + +struct netdev_hw_addr { + struct list_head list; + unsigned char addr[32]; + unsigned char type; + + + + + + bool global_use; + int sync_cnt; + int refcount; + int synced; + struct callback_head callback_head; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; +}; +# 242 "./include/linux/netdevice.h" +struct hh_cache { + unsigned int hh_len; + seqlock_t hh_lock; + + + + + + + + unsigned long hh_data[(((128)+(16 -1))&~(16 - 1)) / sizeof(long)]; +}; +# 268 "./include/linux/netdevice.h" +struct header_ops { + int (*create) (struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned int len); + int (*parse)(const struct sk_buff *skb, unsigned char *haddr); + int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); + void (*cache_update)(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char *haddr); + bool (*validate)(const char *ll_header, unsigned int len); + __be16 (*parse_protocol)(const struct sk_buff *skb); +}; + + + + + + +enum netdev_state_t { + __LINK_STATE_START, + __LINK_STATE_PRESENT, + __LINK_STATE_NOCARRIER, + __LINK_STATE_LINKWATCH_PENDING, + __LINK_STATE_DORMANT, + __LINK_STATE_TESTING, +}; + + + + + + +struct netdev_boot_setup { + char name[16]; + struct ifmap map; +}; + + +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) netdev_boot_setup(char *str); + +struct gro_list { + struct list_head list; + int count; +}; +# 322 "./include/linux/netdevice.h" +struct napi_struct { + + + + + + + struct list_head poll_list; + + unsigned long state; + int weight; + int defer_hard_irqs_count; + unsigned long gro_bitmask; + int (*poll)(struct napi_struct *, int); + + int poll_owner; + + struct net_device *dev; + struct gro_list gro_hash[8]; + struct sk_buff *skb; + struct list_head rx_list; + int rx_count; + struct hrtimer timer; + struct list_head dev_list; + struct hlist_node napi_hash_node; + unsigned int napi_id; +}; + +enum { + NAPI_STATE_SCHED, + NAPI_STATE_MISSED, + NAPI_STATE_DISABLE, + NAPI_STATE_NPSVC, + NAPI_STATE_HASHED, + NAPI_STATE_NO_BUSY_POLL, + NAPI_STATE_IN_BUSY_POLL, +}; + +enum { + NAPIF_STATE_SCHED = ((((1UL))) << (NAPI_STATE_SCHED)), + NAPIF_STATE_MISSED = ((((1UL))) << (NAPI_STATE_MISSED)), + NAPIF_STATE_DISABLE = ((((1UL))) << (NAPI_STATE_DISABLE)), + NAPIF_STATE_NPSVC = ((((1UL))) << (NAPI_STATE_NPSVC)), + NAPIF_STATE_HASHED = ((((1UL))) << (NAPI_STATE_HASHED)), + NAPIF_STATE_NO_BUSY_POLL = ((((1UL))) << (NAPI_STATE_NO_BUSY_POLL)), + NAPIF_STATE_IN_BUSY_POLL = ((((1UL))) << (NAPI_STATE_IN_BUSY_POLL)), +}; + +enum gro_result { + GRO_MERGED, + GRO_MERGED_FREE, + GRO_HELD, + GRO_NORMAL, + GRO_DROP, + GRO_CONSUMED, +}; +typedef enum gro_result gro_result_t; +# 421 "./include/linux/netdevice.h" +enum rx_handler_result { + RX_HANDLER_CONSUMED, + RX_HANDLER_ANOTHER, + RX_HANDLER_EXACT, + RX_HANDLER_PASS, +}; +typedef enum rx_handler_result rx_handler_result_t; +typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); + +void __napi_schedule(struct napi_struct *n); +void __napi_schedule_irqoff(struct napi_struct *n); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool napi_disable_pending(struct napi_struct *n) +{ + return test_bit(NAPI_STATE_DISABLE, &n->state); +} + +bool napi_schedule_prep(struct napi_struct *n); +# 447 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void napi_schedule(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule(n); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void napi_schedule_irqoff(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule_irqoff(n); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool napi_reschedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) { + __napi_schedule(napi); + return true; + } + return false; +} + +bool napi_complete_done(struct napi_struct *n, int work_done); +# 484 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool napi_complete(struct napi_struct *n) +{ + return napi_complete_done(n, 0); +} +# 501 "./include/linux/netdevice.h" +bool napi_hash_del(struct napi_struct *napi); +# 510 "./include/linux/netdevice.h" +void napi_disable(struct napi_struct *n); +# 519 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void napi_enable(struct napi_struct *n) +{ + do { if (__builtin_expect(!!(!test_bit(NAPI_STATE_SCHED, &n->state)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1223)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/netdevice.h"), "i" (521), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1224)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + do { } while (0); + clear_bit(NAPI_STATE_SCHED, &n->state); + clear_bit(NAPI_STATE_NPSVC, &n->state); +} +# 535 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void napi_synchronize(const struct napi_struct *n) +{ + if (1) + while (test_bit(NAPI_STATE_SCHED, &n->state)) + msleep(1); + else + __asm__ __volatile__("": : :"memory"); +} +# 552 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = ({ do { extern void __compiletime_assert_1225(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->state) == sizeof(char) || sizeof(n->state) == sizeof(short) || sizeof(n->state) == sizeof(int) || sizeof(n->state) == sizeof(long)) || sizeof(n->state) == sizeof(long long))) __compiletime_assert_1225(); } while (0); ({ typeof( _Generic((n->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (n->state))) __x = (*(const volatile typeof( _Generic((n->state), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (n->state))) *)&(n->state)); do { } while (0); (typeof(n->state))__x; }); }); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (({ typeof(&n->state) __ai_ptr = (&n->state); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__(*((__ai_ptr))) __ret; __typeof__(*((__ai_ptr))) __old = ((val)); __typeof__(*((__ai_ptr))) __new = ((new)); switch ((sizeof(*(__ai_ptr)))) { case 1: { volatile u8 *__ptr = (volatile u8 *)((__ai_ptr)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((__ai_ptr)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((__ai_ptr)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 8: { volatile u64 *__ptr = (volatile u64 *)((__ai_ptr)); asm volatile(".pushsection .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".popsection\n" "671:" "\n\tlock; " "cmpxchgq %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; }); }) != val); + + return true; +} + +enum netdev_queue_state_t { + __QUEUE_STATE_DRV_XOFF, + __QUEUE_STATE_STACK_XOFF, + __QUEUE_STATE_FROZEN, +}; +# 596 "./include/linux/netdevice.h" +struct netdev_queue { + + + + struct net_device *dev; + struct Qdisc *qdisc; + struct Qdisc *qdisc_sleeping; + + struct kobject kobj; + + + int numa_node; + + unsigned long tx_maxrate; + + + + + unsigned long trans_timeout; + + + struct net_device *sb_dev; + + struct xdp_umem *umem; + + + + + spinlock_t _xmit_lock __attribute__((__aligned__((1 << (6))))); + int xmit_lock_owner; + + + + unsigned long trans_start; + + unsigned long state; + + + struct dql dql; + +} __attribute__((__aligned__((1 << (6))))); + +extern int sysctl_fb_tunnels_only_for_init_net; +extern int sysctl_devconf_inherit_init_net; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool net_has_fallback_tunnels(const struct net *net) +{ + return net == &init_net || + !1 || + !sysctl_fb_tunnels_only_for_init_net; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int netdev_queue_numa_node_read(const struct netdev_queue *q) +{ + + return q->numa_node; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_queue_numa_node_write(struct netdev_queue *q, int node) +{ + + q->numa_node = node; + +} + + + + + + +struct rps_map { + unsigned int len; + struct callback_head rcu; + u16 cpus[]; +}; + + + + + + + +struct rps_dev_flow { + u16 cpu; + u16 filter; + unsigned int last_qtail; +}; + + + + + +struct rps_dev_flow_table { + unsigned int mask; + struct callback_head rcu; + struct rps_dev_flow flows[]; +}; +# 709 "./include/linux/netdevice.h" +struct rps_sock_flow_table { + u32 mask; + + u32 ents[] __attribute__((__aligned__((1 << (6))))); +}; + + + + +extern u32 rps_cpu_mask; +extern struct rps_sock_flow_table *rps_sock_flow_table; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rps_record_sock_flow(struct rps_sock_flow_table *table, + u32 hash) +{ + if (table && hash) { + unsigned int index = hash & table->mask; + u32 val = hash & ~rps_cpu_mask; + + + val |= ({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); + + if (table->ents[index] != val) + table->ents[index] = val; + } +} + + +bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, + u16 filter_id); + + + + +struct netdev_rx_queue { + + struct rps_map *rps_map; + struct rps_dev_flow_table *rps_flow_table; + + struct kobject kobj; + struct net_device *dev; + struct xdp_rxq_info xdp_rxq; + + struct xdp_umem *umem; + +} __attribute__((__aligned__((1 << (6))))); + + + + +struct rx_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); + ssize_t (*store)(struct netdev_rx_queue *queue, + const char *buf, size_t len); +}; + + + + + + +struct xps_map { + unsigned int len; + unsigned int alloc_len; + struct callback_head rcu; + u16 queues[]; +}; + + + + + + + +struct xps_dev_maps { + struct callback_head rcu; + struct xps_map *attr_map[]; +}; +# 800 "./include/linux/netdevice.h" +struct netdev_tc_txq { + u16 count; + u16 offset; +}; + + + + + + +struct netdev_fcoe_hbainfo { + char manufacturer[64]; + char serial_number[64]; + char hardware_version[64]; + char driver_version[64]; + char optionrom_version[64]; + char firmware_version[64]; + char model[256]; + char model_description[256]; +}; + + + + + + + +struct netdev_phys_item_id { + unsigned char id[32]; + unsigned char id_len; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, + struct netdev_phys_item_id *b) +{ + return a->id_len == b->id_len && + memcmp(a->id, b->id, a->id_len) == 0; +} + +typedef u16 (*select_queue_fallback_t)(struct net_device *dev, + struct sk_buff *skb, + struct net_device *sb_dev); + +enum tc_setup_type { + TC_SETUP_QDISC_MQPRIO, + TC_SETUP_CLSU32, + TC_SETUP_CLSFLOWER, + TC_SETUP_CLSMATCHALL, + TC_SETUP_CLSBPF, + TC_SETUP_BLOCK, + TC_SETUP_QDISC_CBS, + TC_SETUP_QDISC_RED, + TC_SETUP_QDISC_PRIO, + TC_SETUP_QDISC_MQ, + TC_SETUP_QDISC_ETF, + TC_SETUP_ROOT_QDISC, + TC_SETUP_QDISC_GRED, + TC_SETUP_QDISC_TAPRIO, + TC_SETUP_FT, + TC_SETUP_QDISC_ETS, + TC_SETUP_QDISC_TBF, + TC_SETUP_QDISC_FIFO, +}; + + + + +enum bpf_netdev_command { + + + + + + + + XDP_SETUP_PROG, + XDP_SETUP_PROG_HW, + XDP_QUERY_PROG, + XDP_QUERY_PROG_HW, + + BPF_OFFLOAD_MAP_ALLOC, + BPF_OFFLOAD_MAP_FREE, + XDP_SETUP_XSK_UMEM, +}; + +struct bpf_prog_offload_ops; +struct netlink_ext_ack; +struct xdp_umem; +struct xdp_dev_bulk_queue; + +struct netdev_bpf { + enum bpf_netdev_command command; + union { + + struct { + u32 flags; + struct bpf_prog *prog; + struct netlink_ext_ack *extack; + }; + + struct { + u32 prog_id; + + u32 prog_flags; + }; + + struct { + struct bpf_offloaded_map *offmap; + }; + + struct { + struct xdp_umem *umem; + u16 queue_id; + } xsk; + }; +}; + + + + + + +struct xfrmdev_ops { + int (*xdo_dev_state_add) (struct xfrm_state *x); + void (*xdo_dev_state_delete) (struct xfrm_state *x); + void (*xdo_dev_state_free) (struct xfrm_state *x); + bool (*xdo_dev_offload_ok) (struct sk_buff *skb, + struct xfrm_state *x); + void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); +}; + + +struct dev_ifalias { + struct callback_head rcuhead; + char ifalias[]; +}; + +struct devlink; +struct tlsdev_ops; + +struct netdev_name_node { + struct hlist_node hlist; + struct list_head list; + struct net_device *dev; + const char *name; +}; + +int netdev_name_node_alt_create(struct net_device *dev, const char *name); +int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); + +struct netdev_net_notifier { + struct list_head list; + struct notifier_block *nb; +}; +# 1282 "./include/linux/netdevice.h" +struct net_device_ops { + int (*ndo_init)(struct net_device *dev); + void (*ndo_uninit)(struct net_device *dev); + int (*ndo_open)(struct net_device *dev); + int (*ndo_stop)(struct net_device *dev); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + struct net_device *dev); + netdev_features_t (*ndo_features_check)(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); + u16 (*ndo_select_queue)(struct net_device *dev, + struct sk_buff *skb, + struct net_device *sb_dev); + void (*ndo_change_rx_flags)(struct net_device *dev, + int flags); + void (*ndo_set_rx_mode)(struct net_device *dev); + int (*ndo_set_mac_address)(struct net_device *dev, + void *addr); + int (*ndo_validate_addr)(struct net_device *dev); + int (*ndo_do_ioctl)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_set_config)(struct net_device *dev, + struct ifmap *map); + int (*ndo_change_mtu)(struct net_device *dev, + int new_mtu); + int (*ndo_neigh_setup)(struct net_device *dev, + struct neigh_parms *); + void (*ndo_tx_timeout) (struct net_device *dev, + unsigned int txqueue); + + void (*ndo_get_stats64)(struct net_device *dev, + struct rtnl_link_stats64 *storage); + bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); + int (*ndo_get_offload_stats)(int attr_id, + const struct net_device *dev, + void *attr_data); + struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); + + int (*ndo_vlan_rx_add_vid)(struct net_device *dev, + __be16 proto, u16 vid); + int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, + __be16 proto, u16 vid); + + void (*ndo_poll_controller)(struct net_device *dev); + int (*ndo_netpoll_setup)(struct net_device *dev, + struct netpoll_info *info); + void (*ndo_netpoll_cleanup)(struct net_device *dev); + + int (*ndo_set_vf_mac)(struct net_device *dev, + int queue, u8 *mac); + int (*ndo_set_vf_vlan)(struct net_device *dev, + int queue, u16 vlan, + u8 qos, __be16 proto); + int (*ndo_set_vf_rate)(struct net_device *dev, + int vf, int min_tx_rate, + int max_tx_rate); + int (*ndo_set_vf_spoofchk)(struct net_device *dev, + int vf, bool setting); + int (*ndo_set_vf_trust)(struct net_device *dev, + int vf, bool setting); + int (*ndo_get_vf_config)(struct net_device *dev, + int vf, + struct ifla_vf_info *ivf); + int (*ndo_set_vf_link_state)(struct net_device *dev, + int vf, int link_state); + int (*ndo_get_vf_stats)(struct net_device *dev, + int vf, + struct ifla_vf_stats + *vf_stats); + int (*ndo_set_vf_port)(struct net_device *dev, + int vf, + struct nlattr *port[]); + int (*ndo_get_vf_port)(struct net_device *dev, + int vf, struct sk_buff *skb); + int (*ndo_get_vf_guid)(struct net_device *dev, + int vf, + struct ifla_vf_guid *node_guid, + struct ifla_vf_guid *port_guid); + int (*ndo_set_vf_guid)(struct net_device *dev, + int vf, u64 guid, + int guid_type); + int (*ndo_set_vf_rss_query_en)( + struct net_device *dev, + int vf, bool setting); + int (*ndo_setup_tc)(struct net_device *dev, + enum tc_setup_type type, + void *type_data); + + int (*ndo_fcoe_enable)(struct net_device *dev); + int (*ndo_fcoe_disable)(struct net_device *dev); + int (*ndo_fcoe_ddp_setup)(struct net_device *dev, + u16 xid, + struct scatterlist *sgl, + unsigned int sgc); + int (*ndo_fcoe_ddp_done)(struct net_device *dev, + u16 xid); + int (*ndo_fcoe_ddp_target)(struct net_device *dev, + u16 xid, + struct scatterlist *sgl, + unsigned int sgc); + int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, + struct netdev_fcoe_hbainfo *hbainfo); + + + + + + int (*ndo_fcoe_get_wwn)(struct net_device *dev, + u64 *wwn, int type); + + + + int (*ndo_rx_flow_steer)(struct net_device *dev, + const struct sk_buff *skb, + u16 rxq_index, + u32 flow_id); + + int (*ndo_add_slave)(struct net_device *dev, + struct net_device *slave_dev, + struct netlink_ext_ack *extack); + int (*ndo_del_slave)(struct net_device *dev, + struct net_device *slave_dev); + struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, + struct sk_buff *skb, + bool all_slaves); + netdev_features_t (*ndo_fix_features)(struct net_device *dev, + netdev_features_t features); + int (*ndo_set_features)(struct net_device *dev, + netdev_features_t features); + int (*ndo_neigh_construct)(struct net_device *dev, + struct neighbour *n); + void (*ndo_neigh_destroy)(struct net_device *dev, + struct neighbour *n); + + int (*ndo_fdb_add)(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, + u16 flags, + struct netlink_ext_ack *extack); + int (*ndo_fdb_del)(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid); + int (*ndo_fdb_dump)(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, + int *idx); + int (*ndo_fdb_get)(struct sk_buff *skb, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, u32 portid, u32 seq, + struct netlink_ext_ack *extack); + int (*ndo_bridge_setlink)(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags, + struct netlink_ext_ack *extack); + int (*ndo_bridge_getlink)(struct sk_buff *skb, + u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask, + int nlflags); + int (*ndo_bridge_dellink)(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags); + int (*ndo_change_carrier)(struct net_device *dev, + bool new_carrier); + int (*ndo_get_phys_port_id)(struct net_device *dev, + struct netdev_phys_item_id *ppid); + int (*ndo_get_port_parent_id)(struct net_device *dev, + struct netdev_phys_item_id *ppid); + int (*ndo_get_phys_port_name)(struct net_device *dev, + char *name, size_t len); + void (*ndo_udp_tunnel_add)(struct net_device *dev, + struct udp_tunnel_info *ti); + void (*ndo_udp_tunnel_del)(struct net_device *dev, + struct udp_tunnel_info *ti); + void* (*ndo_dfwd_add_station)(struct net_device *pdev, + struct net_device *dev); + void (*ndo_dfwd_del_station)(struct net_device *pdev, + void *priv); + + int (*ndo_set_tx_maxrate)(struct net_device *dev, + int queue_index, + u32 maxrate); + int (*ndo_get_iflink)(const struct net_device *dev); + int (*ndo_change_proto_down)(struct net_device *dev, + bool proto_down); + int (*ndo_fill_metadata_dst)(struct net_device *dev, + struct sk_buff *skb); + void (*ndo_set_rx_headroom)(struct net_device *dev, + int needed_headroom); + int (*ndo_bpf)(struct net_device *dev, + struct netdev_bpf *bpf); + int (*ndo_xdp_xmit)(struct net_device *dev, int n, + struct xdp_frame **xdp, + u32 flags); + int (*ndo_xsk_wakeup)(struct net_device *dev, + u32 queue_id, u32 flags); + struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); + int (*ndo_tunnel_ctl)(struct net_device *dev, + struct ip_tunnel_parm *p, int cmd); +}; +# 1536 "./include/linux/netdevice.h" +enum netdev_priv_flags { + IFF_802_1Q_VLAN = 1<<0, + IFF_EBRIDGE = 1<<1, + IFF_BONDING = 1<<2, + IFF_ISATAP = 1<<3, + IFF_WAN_HDLC = 1<<4, + IFF_XMIT_DST_RELEASE = 1<<5, + IFF_DONT_BRIDGE = 1<<6, + IFF_DISABLE_NETPOLL = 1<<7, + IFF_MACVLAN_PORT = 1<<8, + IFF_BRIDGE_PORT = 1<<9, + IFF_OVS_DATAPATH = 1<<10, + IFF_TX_SKB_SHARING = 1<<11, + IFF_UNICAST_FLT = 1<<12, + IFF_TEAM_PORT = 1<<13, + IFF_SUPP_NOFCS = 1<<14, + IFF_LIVE_ADDR_CHANGE = 1<<15, + IFF_MACVLAN = 1<<16, + IFF_XMIT_DST_RELEASE_PERM = 1<<17, + IFF_L3MDEV_MASTER = 1<<18, + IFF_NO_QUEUE = 1<<19, + IFF_OPENVSWITCH = 1<<20, + IFF_L3MDEV_SLAVE = 1<<21, + IFF_TEAM = 1<<22, + IFF_RXFH_CONFIGURED = 1<<23, + IFF_PHONY_HEADROOM = 1<<24, + IFF_MACSEC = 1<<25, + IFF_NO_RX_HANDLER = 1<<26, + IFF_FAILOVER = 1<<27, + IFF_FAILOVER_SLAVE = 1<<28, + IFF_L3MDEV_RX_HANDLER = 1<<29, + IFF_LIVE_RENAME_OK = 1<<30, +}; +# 1843 "./include/linux/netdevice.h" +struct net_device { + char name[16]; + struct netdev_name_node *name_node; + struct dev_ifalias *ifalias; + + + + + unsigned long mem_end; + unsigned long mem_start; + unsigned long base_addr; + int irq; + + + + + + + + unsigned long state; + + struct list_head dev_list; + struct list_head napi_list; + struct list_head unreg_list; + struct list_head close_list; + struct list_head ptype_all; + struct list_head ptype_specific; + + struct { + struct list_head upper; + struct list_head lower; + } adj_list; + + netdev_features_t features; + netdev_features_t hw_features; + netdev_features_t wanted_features; + netdev_features_t vlan_features; + netdev_features_t hw_enc_features; + netdev_features_t mpls_features; + netdev_features_t gso_partial_features; + + int ifindex; + int group; + + struct net_device_stats stats; + + atomic_long_t rx_dropped; + atomic_long_t tx_dropped; + atomic_long_t rx_nohandler; + + + atomic_t carrier_up_count; + atomic_t carrier_down_count; + + + const struct iw_handler_def *wireless_handlers; + struct iw_public_data *wireless_data; + + const struct net_device_ops *netdev_ops; + const struct ethtool_ops *ethtool_ops; + + const struct l3mdev_ops *l3mdev_ops; + + + const struct ndisc_ops *ndisc_ops; + + + + const struct xfrmdev_ops *xfrmdev_ops; + + + + const struct tlsdev_ops *tlsdev_ops; + + + const struct header_ops *header_ops; + + unsigned int flags; + unsigned int priv_flags; + + unsigned short gflags; + unsigned short padded; + + unsigned char operstate; + unsigned char link_mode; + + unsigned char if_port; + unsigned char dma; + + + + + + + unsigned int mtu; + unsigned int min_mtu; + unsigned int max_mtu; + unsigned short type; + unsigned short hard_header_len; + unsigned char min_header_len; + + unsigned short needed_headroom; + unsigned short needed_tailroom; + + + unsigned char perm_addr[32]; + unsigned char addr_assign_type; + unsigned char addr_len; + unsigned char upper_level; + unsigned char lower_level; + unsigned short neigh_priv_len; + unsigned short dev_id; + unsigned short dev_port; + spinlock_t addr_list_lock; + unsigned char name_assign_type; + bool uc_promisc; + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + + + struct kset *queues_kset; + + unsigned int promiscuity; + unsigned int allmulti; + + + + + + struct vlan_info *vlan_info; + + + struct dsa_port *dsa_ptr; + + + struct tipc_bearer *tipc_ptr; + + + void *atalk_ptr; + + struct in_device *ip_ptr; + + struct dn_dev *dn_ptr; + + struct inet6_dev *ip6_ptr; + + void *ax25_ptr; + + struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; + + struct mpls_dev *mpls_ptr; + + + + + + + unsigned char *dev_addr; + + struct netdev_rx_queue *_rx; + unsigned int num_rx_queues; + unsigned int real_num_rx_queues; + + struct bpf_prog *xdp_prog; + unsigned long gro_flush_timeout; + int napi_defer_hard_irqs; + rx_handler_func_t *rx_handler; + void *rx_handler_data; + + + struct mini_Qdisc *miniq_ingress; + + struct netdev_queue *ingress_queue; + + struct nf_hook_entries *nf_hooks_ingress; + + + unsigned char broadcast[32]; + + struct cpu_rmap *rx_cpu_rmap; + + struct hlist_node index_hlist; + + + + + struct netdev_queue *_tx __attribute__((__aligned__((1 << (6))))); + unsigned int num_tx_queues; + unsigned int real_num_tx_queues; + struct Qdisc *qdisc; + unsigned int tx_queue_len; + spinlock_t tx_global_lock; + + struct xdp_dev_bulk_queue *xdp_bulkq; + + + struct xps_dev_maps *xps_cpus_map; + struct xps_dev_maps *xps_rxqs_map; + + + struct mini_Qdisc *miniq_egress; + + + + struct hlist_head qdisc_hash[1 << (4)]; + + + struct timer_list watchdog_timer; + int watchdog_timeo; + + struct list_head todo_list; + int *pcpu_refcnt; + + struct list_head link_watch_list; + + enum { NETREG_UNINITIALIZED=0, + NETREG_REGISTERED, + NETREG_UNREGISTERING, + NETREG_UNREGISTERED, + NETREG_RELEASED, + NETREG_DUMMY, + } reg_state:8; + + bool dismantle; + + enum { + RTNL_LINK_INITIALIZED, + RTNL_LINK_INITIALIZING, + } rtnl_link_state:16; + + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *dev); + + + struct netpoll_info *npinfo; + + + possible_net_t nd_net; + + + union { + void *ml_priv; + struct pcpu_lstats *lstats; + struct pcpu_sw_netstats *tstats; + struct pcpu_dstats *dstats; + }; + + + struct garp_port *garp_port; + + + struct mrp_port *mrp_port; + + + struct device dev; + const struct attribute_group *sysfs_groups[4]; + const struct attribute_group *sysfs_rx_queue_group; + + const struct rtnl_link_ops *rtnl_link_ops; + + + + unsigned int gso_max_size; + + u16 gso_max_segs; + + + const struct dcbnl_rtnl_ops *dcbnl_ops; + + s16 num_tc; + struct netdev_tc_txq tc_to_txq[16]; + u8 prio_tc_map[15 + 1]; + + + unsigned int fcoe_ddp_xid; + + + struct netprio_map *priomap; + + struct phy_device *phydev; + struct sfp_bus *sfp_bus; + struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; + bool proto_down; + unsigned wol_enabled:1; + + struct list_head net_notifier_list; + + + + const struct macsec_ops *macsec_ops; + +}; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_elide_gro(const struct net_device *dev) +{ + if (!(dev->features & ((netdev_features_t)1 << (NETIF_F_GRO_BIT))) || dev->xdp_prog) + return true; + return false; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) +{ + return dev->prio_tc_map[prio & 15]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) +{ + if (tc >= dev->num_tc) + return -22; + + dev->prio_tc_map[prio & 15] = tc & 15; + return 0; +} + +int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); +void netdev_reset_tc(struct net_device *dev); +int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); +int netdev_set_num_tc(struct net_device *dev, u8 num_tc); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +int netdev_get_num_tc(struct net_device *dev) +{ + return dev->num_tc; +} + +void netdev_unbind_sb_channel(struct net_device *dev, + struct net_device *sb_dev); +int netdev_bind_sb_channel_queue(struct net_device *dev, + struct net_device *sb_dev, + u8 tc, u16 count, u16 offset); +int netdev_set_sb_channel(struct net_device *dev, u16 channel); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int netdev_get_sb_channel(struct net_device *dev) +{ + return __builtin_choose_expr(((!!(sizeof((typeof((int)(-dev->num_tc)) *)1 == (typeof((int)(0)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(-dev->num_tc)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(0)) * 0l)) : (int *)8))))), (((int)(-dev->num_tc)) > ((int)(0)) ? ((int)(-dev->num_tc)) : ((int)(0))), ({ typeof((int)(-dev->num_tc)) __UNIQUE_ID___x1226 = ((int)(-dev->num_tc)); typeof((int)(0)) __UNIQUE_ID___y1227 = ((int)(0)); ((__UNIQUE_ID___x1226) > (__UNIQUE_ID___y1227) ? (__UNIQUE_ID___x1226) : (__UNIQUE_ID___y1227)); })); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, + unsigned int index) +{ + return &dev->_tx[index]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, + const struct sk_buff *skb) +{ + return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_for_each_tx_queue(struct net_device *dev, + void (*f)(struct net_device *, + struct netdev_queue *, + void *), + void *arg) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + f(dev, &dev->_tx[i], arg); +} +# 2229 "./include/linux/netdevice.h" +u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); +struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, + struct sk_buff *skb, + struct net_device *sb_dev); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned netdev_get_fwd_headroom(struct net_device *dev) +{ + return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_set_rx_headroom(struct net_device *dev, int new_hr) +{ + if (dev->netdev_ops->ndo_set_rx_headroom) + dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_reset_rx_headroom(struct net_device *dev) +{ + netdev_set_rx_headroom(dev, -1); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct net *dev_net(const struct net_device *dev) +{ + return read_pnet(&dev->nd_net); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void dev_net_set(struct net_device *dev, struct net *net) +{ + write_pnet(&dev->nd_net, net); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *netdev_priv(const struct net_device *dev) +{ + return (char *)dev + ((((sizeof(struct net_device))) + ((typeof((sizeof(struct net_device))))((32)) - 1)) & ~((typeof((sizeof(struct net_device))))((32)) - 1)); +} +# 2307 "./include/linux/netdevice.h" +void netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight); +# 2321 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_napi_add(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) +{ + set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); + netif_napi_add(dev, napi, poll, weight); +} + + + + + + + +void netif_napi_del(struct napi_struct *napi); + +struct napi_gro_cb { + + void *frag0; + + + unsigned int frag0_len; + + + int data_offset; + + + u16 flush; + + + u16 flush_id; + + + u16 count; + + + u16 gro_remcsum_start; + + + unsigned long age; + + + u16 proto; + + + u8 same_flow:1; + + + u8 encap_mark:1; + + + u8 csum_valid:1; + + + u8 csum_cnt:3; + + + u8 free:2; + + + + + u8 is_ipv6:1; + + + u8 is_fou:1; + + + u8 is_atomic:1; + + + u8 recursion_counter:4; + + + u8 is_flist:1; + + + __wsum csum; + + + struct sk_buff *last; +}; + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int gro_recursion_inc_test(struct sk_buff *skb) +{ + return ++((struct napi_gro_cb *)(skb)->cb)->recursion_counter == 15; +} + +typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *call_gro_receive(gro_receive_t cb, + struct list_head *head, + struct sk_buff *skb) +{ + if (__builtin_expect(!!(gro_recursion_inc_test(skb)), 0)) { + ((struct napi_gro_cb *)(skb)->cb)->flush |= 1; + return ((void *)0); + } + + return cb(head, skb); +} + +typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, + struct sk_buff *); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, + struct sock *sk, + struct list_head *head, + struct sk_buff *skb) +{ + if (__builtin_expect(!!(gro_recursion_inc_test(skb)), 0)) { + ((struct napi_gro_cb *)(skb)->cb)->flush |= 1; + return ((void *)0); + } + + return cb(sk, head, skb); +} + +struct packet_type { + __be16 type; + bool ignore_outgoing; + struct net_device *dev; + int (*func) (struct sk_buff *, + struct net_device *, + struct packet_type *, + struct net_device *); + void (*list_func) (struct list_head *, + struct packet_type *, + struct net_device *); + bool (*id_match)(struct packet_type *ptype, + struct sock *sk); + void *af_packet_priv; + struct list_head list; +}; + +struct offload_callbacks { + struct sk_buff *(*gso_segment)(struct sk_buff *skb, + netdev_features_t features); + struct sk_buff *(*gro_receive)(struct list_head *head, + struct sk_buff *skb); + int (*gro_complete)(struct sk_buff *skb, int nhoff); +}; + +struct packet_offload { + __be16 type; + u16 priority; + struct offload_callbacks callbacks; + struct list_head list; +}; + + +struct pcpu_sw_netstats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; +} __attribute__((__aligned__(4 * sizeof(u64)))); + +struct pcpu_lstats { + u64_stats_t packets; + u64_stats_t bytes; + struct u64_stats_sync syncp; +} __attribute__((__aligned__(2 * sizeof(u64)))); + +void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_lstats_add(struct net_device *dev, unsigned int len) +{ + struct pcpu_lstats *lstats = ({ do { const void *__vpp_verify = (typeof((dev->lstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (dev->lstats)); (typeof(*(dev->lstats)) *)tcp_ptr__; }); }); + + u64_stats_update_begin(&lstats->syncp); + u64_stats_add(&lstats->bytes, len); + u64_stats_inc(&lstats->packets); + u64_stats_update_end(&lstats->syncp); +} +# 2517 "./include/linux/netdevice.h" +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN, + NETDEV_LAG_TX_TYPE_RANDOM, + NETDEV_LAG_TX_TYPE_BROADCAST, + NETDEV_LAG_TX_TYPE_ROUNDROBIN, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, + NETDEV_LAG_TX_TYPE_HASH, +}; + +enum netdev_lag_hash { + NETDEV_LAG_HASH_NONE, + NETDEV_LAG_HASH_L2, + NETDEV_LAG_HASH_L34, + NETDEV_LAG_HASH_L23, + NETDEV_LAG_HASH_E23, + NETDEV_LAG_HASH_E34, + NETDEV_LAG_HASH_UNKNOWN, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; +}; + +struct netdev_lag_lower_state_info { + u8 link_up : 1, + tx_enabled : 1; +}; + + + + + + + +enum netdev_cmd { + NETDEV_UP = 1, + NETDEV_DOWN, + NETDEV_REBOOT, + + + + NETDEV_CHANGE, + NETDEV_REGISTER, + NETDEV_UNREGISTER, + NETDEV_CHANGEMTU, + NETDEV_CHANGEADDR, + NETDEV_PRE_CHANGEADDR, + NETDEV_GOING_DOWN, + NETDEV_CHANGENAME, + NETDEV_FEAT_CHANGE, + NETDEV_BONDING_FAILOVER, + NETDEV_PRE_UP, + NETDEV_PRE_TYPE_CHANGE, + NETDEV_POST_TYPE_CHANGE, + NETDEV_POST_INIT, + NETDEV_RELEASE, + NETDEV_NOTIFY_PEERS, + NETDEV_JOIN, + NETDEV_CHANGEUPPER, + NETDEV_RESEND_IGMP, + NETDEV_PRECHANGEMTU, + NETDEV_CHANGEINFODATA, + NETDEV_BONDING_INFO, + NETDEV_PRECHANGEUPPER, + NETDEV_CHANGELOWERSTATE, + NETDEV_UDP_TUNNEL_PUSH_INFO, + NETDEV_UDP_TUNNEL_DROP_INFO, + NETDEV_CHANGE_TX_QUEUE_LEN, + NETDEV_CVLAN_FILTER_PUSH_INFO, + NETDEV_CVLAN_FILTER_DROP_INFO, + NETDEV_SVLAN_FILTER_PUSH_INFO, + NETDEV_SVLAN_FILTER_DROP_INFO, +}; +const char *netdev_cmd_to_name(enum netdev_cmd cmd); + +int register_netdevice_notifier(struct notifier_block *nb); +int unregister_netdevice_notifier(struct notifier_block *nb); +int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); +int unregister_netdevice_notifier_net(struct net *net, + struct notifier_block *nb); +int register_netdevice_notifier_dev_net(struct net_device *dev, + struct notifier_block *nb, + struct netdev_net_notifier *nn); +int unregister_netdevice_notifier_dev_net(struct net_device *dev, + struct notifier_block *nb, + struct netdev_net_notifier *nn); + +struct netdev_notifier_info { + struct net_device *dev; + struct netlink_ext_ack *extack; +}; + +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; + union { + u32 mtu; + } ext; +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; + unsigned int flags_changed; +}; + +struct netdev_notifier_changeupper_info { + struct netdev_notifier_info info; + struct net_device *upper_dev; + bool master; + bool linking; + void *upper_info; +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; + void *lower_state_info; +}; + +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; + const unsigned char *dev_addr; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_notifier_info_init(struct netdev_notifier_info *info, + struct net_device *dev) +{ + info->dev = dev; + info->extack = ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net_device * +netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) +{ + return info->dev; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct netlink_ext_ack * +netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) +{ + return info->extack; +} + +int call_netdevice_notifiers(unsigned long val, struct net_device *dev); + + +extern rwlock_t dev_base_lock; +# 2684 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net_device *next_net_device(struct net_device *dev) +{ + struct list_head *lh; + struct net *net; + + net = dev_net(dev); + lh = dev->dev_list.next; + return lh == &net->dev_base_head ? ((void *)0) : ({ void *__mptr = (void *)(lh); do { extern void __compiletime_assert_1228(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(lh)), typeof(((struct net_device *)0)->dev_list)) && !__builtin_types_compatible_p(typeof(*(lh)), typeof(void))))) __compiletime_assert_1228(); } while (0); ((struct net_device *)(__mptr - __builtin_offsetof(struct net_device, dev_list))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net_device *next_net_device_rcu(struct net_device *dev) +{ + struct list_head *lh; + struct net *net; + + net = dev_net(dev); + lh = ({ typeof(*((*((struct list_head **)(&(&dev->dev_list)->next))))) *________p1 = (typeof(*((*((struct list_head **)(&(&dev->dev_list)->next))))) *)({ do { extern void __compiletime_assert_1229(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(&dev->dev_list)->next))))) == sizeof(long long))) __compiletime_assert_1229(); } while (0); ({ typeof( _Generic((((*((struct list_head **)(&(&dev->dev_list)->next))))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((*((struct list_head **)(&(&dev->dev_list)->next))))))) __x = (*(const volatile typeof( _Generic((((*((struct list_head **)(&(&dev->dev_list)->next))))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((*((struct list_head **)(&(&dev->dev_list)->next))))))) *)&(((*((struct list_head **)(&(&dev->dev_list)->next)))))); do { } while (0); (typeof(((*((struct list_head **)(&(&dev->dev_list)->next))))))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/netdevice.h", 2700, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*((*((struct list_head **)(&(&dev->dev_list)->next))))) *)(________p1)); }); + return lh == &net->dev_base_head ? ((void *)0) : ({ void *__mptr = (void *)(lh); do { extern void __compiletime_assert_1230(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(lh)), typeof(((struct net_device *)0)->dev_list)) && !__builtin_types_compatible_p(typeof(*(lh)), typeof(void))))) __compiletime_assert_1230(); } while (0); ((struct net_device *)(__mptr - __builtin_offsetof(struct net_device, dev_list))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net_device *first_net_device(struct net *net) +{ + return list_empty(&net->dev_base_head) ? ((void *)0) : + ({ void *__mptr = (void *)(net->dev_base_head.next); do { extern void __compiletime_assert_1231(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(net->dev_base_head.next)), typeof(((struct net_device *)0)->dev_list)) && !__builtin_types_compatible_p(typeof(*(net->dev_base_head.next)), typeof(void))))) __compiletime_assert_1231(); } while (0); ((struct net_device *)(__mptr - __builtin_offsetof(struct net_device, dev_list))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net_device *first_net_device_rcu(struct net *net) +{ + struct list_head *lh = ({ typeof(*((*((struct list_head **)(&(&net->dev_base_head)->next))))) *________p1 = (typeof(*((*((struct list_head **)(&(&net->dev_base_head)->next))))) *)({ do { extern void __compiletime_assert_1232(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(char) || sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(short) || sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(int) || sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(long)) || sizeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))) == sizeof(long long))) __compiletime_assert_1232(); } while (0); ({ typeof( _Generic((((*((struct list_head **)(&(&net->dev_base_head)->next))))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((*((struct list_head **)(&(&net->dev_base_head)->next))))))) __x = (*(const volatile typeof( _Generic((((*((struct list_head **)(&(&net->dev_base_head)->next))))), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (((*((struct list_head **)(&(&net->dev_base_head)->next))))))) *)&(((*((struct list_head **)(&(&net->dev_base_head)->next)))))); do { } while (0); (typeof(((*((struct list_head **)(&(&net->dev_base_head)->next))))))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/netdevice.h", 2712, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*((*((struct list_head **)(&(&net->dev_base_head)->next))))) *)(________p1)); }); + + return lh == &net->dev_base_head ? ((void *)0) : ({ void *__mptr = (void *)(lh); do { extern void __compiletime_assert_1233(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(lh)), typeof(((struct net_device *)0)->dev_list)) && !__builtin_types_compatible_p(typeof(*(lh)), typeof(void))))) __compiletime_assert_1233(); } while (0); ((struct net_device *)(__mptr - __builtin_offsetof(struct net_device, dev_list))); }); +} + +int netdev_boot_setup_check(struct net_device *dev); +unsigned long netdev_boot_base(const char *prefix, int unit); +struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, + const char *hwaddr); +struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); +struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); +void dev_add_pack(struct packet_type *pt); +void dev_remove_pack(struct packet_type *pt); +void __dev_remove_pack(struct packet_type *pt); +void dev_add_offload(struct packet_offload *po); +void dev_remove_offload(struct packet_offload *po); + +int dev_get_iflink(const struct net_device *dev); +int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); +struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, + unsigned short mask); +struct net_device *dev_get_by_name(struct net *net, const char *name); +struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); +struct net_device *__dev_get_by_name(struct net *net, const char *name); +int dev_alloc_name(struct net_device *dev, const char *name); +int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); +void dev_close(struct net_device *dev); +void dev_close_many(struct list_head *head, bool unlink); +void dev_disable_lro(struct net_device *dev); +int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); +u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); +u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); +int dev_queue_xmit(struct sk_buff *skb); +int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); +int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); +int register_netdevice(struct net_device *dev); +void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); +void unregister_netdevice_many(struct list_head *head); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void unregister_netdevice(struct net_device *dev) +{ + unregister_netdevice_queue(dev, ((void *)0)); +} + +int netdev_refcnt_read(const struct net_device *dev); +void free_netdev(struct net_device *dev); +void netdev_freemem(struct net_device *dev); +void synchronize_net(void); +int init_dummy_netdev(struct net_device *dev); + +struct net_device *netdev_get_xmit_slave(struct net_device *dev, + struct sk_buff *skb, + bool all_slaves); +struct net_device *dev_get_by_index(struct net *net, int ifindex); +struct net_device *__dev_get_by_index(struct net *net, int ifindex); +struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); +struct net_device *dev_get_by_napi_id(unsigned int napi_id); +int netdev_get_name(struct net *net, char *name, int ifindex); +int dev_restart(struct net_device *dev); +int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); +int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_gro_offset(const struct sk_buff *skb) +{ + return ((struct napi_gro_cb *)(skb)->cb)->data_offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int skb_gro_len(const struct sk_buff *skb) +{ + return skb->len - ((struct napi_gro_cb *)(skb)->cb)->data_offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gro_pull(struct sk_buff *skb, unsigned int len) +{ + ((struct napi_gro_cb *)(skb)->cb)->data_offset += len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_gro_header_fast(struct sk_buff *skb, + unsigned int offset) +{ + return ((struct napi_gro_cb *)(skb)->cb)->frag0 + offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) +{ + return ((struct napi_gro_cb *)(skb)->cb)->frag0_len < hlen; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gro_frag0_invalidate(struct sk_buff *skb) +{ + ((struct napi_gro_cb *)(skb)->cb)->frag0 = ((void *)0); + ((struct napi_gro_cb *)(skb)->cb)->frag0_len = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, + unsigned int offset) +{ + if (!pskb_may_pull(skb, hlen)) + return ((void *)0); + + skb_gro_frag0_invalidate(skb); + return skb->data + offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_gro_network_header(struct sk_buff *skb) +{ + return (((struct napi_gro_cb *)(skb)->cb)->frag0 ?: skb->data) + + skb_network_offset(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gro_postpull_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + if (((struct napi_gro_cb *)(skb)->cb)->csum_valid) + ((struct napi_gro_cb *)(skb)->cb)->csum = csum_sub(((struct napi_gro_cb *)(skb)->cb)->csum, + csum_partial(start, len, 0)); +} + + + + + + +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_at_gro_remcsum_start(struct sk_buff *skb) +{ + return (((struct napi_gro_cb *)(skb)->cb)->gro_remcsum_start == skb_gro_offset(skb)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, + bool zero_okay, + __sum16 check) +{ + return ((skb->ip_summed != 3 || + skb_checksum_start_offset(skb) < + skb_gro_offset(skb)) && + !skb_at_gro_remcsum_start(skb) && + ((struct napi_gro_cb *)(skb)->cb)->csum_cnt == 0 && + (!zero_okay || check)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, + __wsum psum) +{ + if (((struct napi_gro_cb *)(skb)->cb)->csum_valid && + !csum_fold(csum_add(psum, ((struct napi_gro_cb *)(skb)->cb)->csum))) + return 0; + + ((struct napi_gro_cb *)(skb)->cb)->csum = psum; + + return __skb_gro_checksum_complete(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) +{ + if (((struct napi_gro_cb *)(skb)->cb)->csum_cnt > 0) { + + ((struct napi_gro_cb *)(skb)->cb)->csum_cnt--; + } else { + + + + + __skb_incr_checksum_unnecessary(skb); + } +} +# 2903 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __skb_gro_checksum_convert_check(struct sk_buff *skb) +{ + return (((struct napi_gro_cb *)(skb)->cb)->csum_cnt == 0 && + !((struct napi_gro_cb *)(skb)->cb)->csum_valid); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_gro_checksum_convert(struct sk_buff *skb, + __wsum pseudo) +{ + ((struct napi_gro_cb *)(skb)->cb)->csum = ~pseudo; + ((struct napi_gro_cb *)(skb)->cb)->csum_valid = 1; +} +# 2923 "./include/linux/netdevice.h" +struct gro_remcsum { + int offset; + __wsum delta; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gro_remcsum_init(struct gro_remcsum *grc) +{ + grc->offset = 0; + grc->delta = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, + unsigned int off, size_t hdrlen, + int start, int offset, + struct gro_remcsum *grc, + bool nopartial) +{ + __wsum delta; + size_t plen = hdrlen + __builtin_choose_expr(((!!(sizeof((typeof((size_t)(offset + sizeof(u16))) *)1 == (typeof((size_t)(start)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((size_t)(offset + sizeof(u16))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((size_t)(start)) * 0l)) : (int *)8))))), (((size_t)(offset + sizeof(u16))) > ((size_t)(start)) ? ((size_t)(offset + sizeof(u16))) : ((size_t)(start))), ({ typeof((size_t)(offset + sizeof(u16))) __UNIQUE_ID___x1234 = ((size_t)(offset + sizeof(u16))); typeof((size_t)(start)) __UNIQUE_ID___y1235 = ((size_t)(start)); ((__UNIQUE_ID___x1234) > (__UNIQUE_ID___y1235) ? (__UNIQUE_ID___x1234) : (__UNIQUE_ID___y1235)); })); + + do { if (__builtin_expect(!!(!((struct napi_gro_cb *)(skb)->cb)->csum_valid), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1236)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/netdevice.h"), "i" (2943), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1237)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + if (!nopartial) { + ((struct napi_gro_cb *)(skb)->cb)->gro_remcsum_start = off + hdrlen + start; + return ptr; + } + + ptr = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, off + plen)) { + ptr = skb_gro_header_slow(skb, off + plen, off); + if (!ptr) + return ((void *)0); + } + + delta = remcsum_adjust(ptr + hdrlen, ((struct napi_gro_cb *)(skb)->cb)->csum, + start, offset); + + + ((struct napi_gro_cb *)(skb)->cb)->csum = csum_add(((struct napi_gro_cb *)(skb)->cb)->csum, delta); + + grc->offset = off + hdrlen + offset; + grc->delta = delta; + + return ptr; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gro_remcsum_cleanup(struct sk_buff *skb, + struct gro_remcsum *grc) +{ + void *ptr; + size_t plen = grc->offset + sizeof(u16); + + if (!grc->delta) + return; + + ptr = skb_gro_header_fast(skb, grc->offset); + if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { + ptr = skb_gro_header_slow(skb, plen, grc->offset); + if (!ptr) + return; + } + + remcsum_unadjust((__sum16 *)ptr, grc->delta); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) +{ + if (PTR_ERR(pp) != -115) + ((struct napi_gro_cb *)(skb)->cb)->flush |= flush; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gro_flush_final_remcsum(struct sk_buff *skb, + struct sk_buff *pp, + int flush, + struct gro_remcsum *grc) +{ + if (PTR_ERR(pp) != -115) { + ((struct napi_gro_cb *)(skb)->cb)->flush |= flush; + skb_gro_remcsum_cleanup(skb, grc); + skb->remcsum_offload = 0; + } +} +# 3021 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dev_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, + unsigned int len) +{ + if (!dev->header_ops || !dev->header_ops->create) + return 0; + + return dev->header_ops->create(skb, dev, type, daddr, saddr, len); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dev_parse_header(const struct sk_buff *skb, + unsigned char *haddr) +{ + const struct net_device *dev = skb->dev; + + if (!dev->header_ops || !dev->header_ops->parse) + return 0; + return dev->header_ops->parse(skb, haddr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __be16 dev_parse_header_protocol(const struct sk_buff *skb) +{ + const struct net_device *dev = skb->dev; + + if (!dev->header_ops || !dev->header_ops->parse_protocol) + return 0; + return dev->header_ops->parse_protocol(skb); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dev_validate_header(const struct net_device *dev, + char *ll_header, int len) +{ + if (__builtin_expect(!!(len >= dev->hard_header_len), 1)) + return true; + if (len < dev->min_header_len) + return false; + + if (capable(17)) { + memset(ll_header + len, 0, dev->hard_header_len - len); + return true; + } + + if (dev->header_ops && dev->header_ops->validate) + return dev->header_ops->validate(ll_header, len); + + return false; +} + +typedef int gifconf_func_t(struct net_device * dev, char * bufptr, + int len, int size); +int register_gifconf(unsigned int family, gifconf_func_t *gifconf); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_gifconf(unsigned int family) +{ + return register_gifconf(family, ((void *)0)); +} + + + +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[(1 << 7)]; + u8 buckets[]; +}; + +extern int netdev_flow_limit_table_len; + + + + + +struct softnet_data { + struct list_head poll_list; + struct sk_buff_head process_queue; + + + unsigned int processed; + unsigned int time_squeeze; + unsigned int received_rps; + + struct softnet_data *rps_ipi_list; + + + struct sd_flow_limit *flow_limit; + + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; + + struct sk_buff_head xfrm_backlog; + + + struct { + u16 recursion; + u8 more; + } xmit; + + + + + unsigned int input_queue_head __attribute__((__aligned__((1 << (6))))); + + + call_single_data_t csd __attribute__((__aligned__((1 << (6))))); + struct softnet_data *rps_ipi_next; + unsigned int cpu; + unsigned int input_queue_tail; + + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void input_queue_head_incr(struct softnet_data *sd) +{ + + sd->input_queue_head++; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void input_queue_tail_incr_save(struct softnet_data *sd, + unsigned int *qtail) +{ + + *qtail = ++sd->input_queue_tail; + +} + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_softnet_data; extern __attribute__((section(".data..percpu" "..shared_aligned"))) __typeof__(struct softnet_data) softnet_data __attribute__((__aligned__((1 << (6))))); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dev_recursion_level(void) +{ + return ({ typeof(softnet_data.xmit.recursion) pscr_ret__; do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.recursion)) { case 1: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) pfo_ret__; switch (sizeof(softnet_data.xmit.recursion)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) pfo_ret__; switch (sizeof(softnet_data.xmit.recursion)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) pfo_ret__; switch (sizeof(softnet_data.xmit.recursion)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) pfo_ret__; switch (sizeof(softnet_data.xmit.recursion)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dev_xmit_recursion(void) +{ + return __builtin_expect(!!(({ __this_cpu_preempt_check("read"); ({ typeof(softnet_data.xmit.recursion) pscr_ret__; do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.recursion)) { case 1: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) pfo_ret__; switch (sizeof(softnet_data.xmit.recursion)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) pfo_ret__; switch (sizeof(softnet_data.xmit.recursion)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) pfo_ret__; switch (sizeof(softnet_data.xmit.recursion)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(softnet_data.xmit.recursion) pfo_ret__; switch (sizeof(softnet_data.xmit.recursion)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.recursion)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }) > 10), 0) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_xmit_recursion_inc(void) +{ + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.recursion)) { case 1: do { typedef typeof((softnet_data.xmit.recursion)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((softnet_data.xmit.recursion))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((softnet_data.xmit.recursion)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((softnet_data.xmit.recursion))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((softnet_data.xmit.recursion)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((softnet_data.xmit.recursion))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((softnet_data.xmit.recursion)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((softnet_data.xmit.recursion))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_xmit_recursion_dec(void) +{ + ({ __this_cpu_preempt_check("add"); do { do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.recursion)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.recursion)) { case 1: do { typedef typeof((softnet_data.xmit.recursion)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(softnet_data.xmit.recursion))(1)) && ((-(typeof(softnet_data.xmit.recursion))(1)) == 1 || (-(typeof(softnet_data.xmit.recursion))(1)) == -1)) ? (int)(-(typeof(softnet_data.xmit.recursion))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(softnet_data.xmit.recursion))(1)); (void)pao_tmp__; } switch (sizeof((softnet_data.xmit.recursion))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "qi" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "re" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((softnet_data.xmit.recursion)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(softnet_data.xmit.recursion))(1)) && ((-(typeof(softnet_data.xmit.recursion))(1)) == 1 || (-(typeof(softnet_data.xmit.recursion))(1)) == -1)) ? (int)(-(typeof(softnet_data.xmit.recursion))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(softnet_data.xmit.recursion))(1)); (void)pao_tmp__; } switch (sizeof((softnet_data.xmit.recursion))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "qi" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "re" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((softnet_data.xmit.recursion)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(softnet_data.xmit.recursion))(1)) && ((-(typeof(softnet_data.xmit.recursion))(1)) == 1 || (-(typeof(softnet_data.xmit.recursion))(1)) == -1)) ? (int)(-(typeof(softnet_data.xmit.recursion))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(softnet_data.xmit.recursion))(1)); (void)pao_tmp__; } switch (sizeof((softnet_data.xmit.recursion))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "qi" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "re" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((softnet_data.xmit.recursion)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(softnet_data.xmit.recursion))(1)) && ((-(typeof(softnet_data.xmit.recursion))(1)) == 1 || (-(typeof(softnet_data.xmit.recursion))(1)) == -1)) ? (int)(-(typeof(softnet_data.xmit.recursion))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(softnet_data.xmit.recursion))(1)); (void)pao_tmp__; } switch (sizeof((softnet_data.xmit.recursion))) { case 1: if (pao_ID__ == 1) asm ("incb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decb ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "qi" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 2: if (pao_ID__ == 1) asm ("incw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decw ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 4: if (pao_ID__ == 1) asm ("incl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decl ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "ri" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; case 8: if (pao_ID__ == 1) asm ("incq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else if (pao_ID__ == -1) asm ("decq ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion))); else asm ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.recursion)) : "re" ((pao_T__)(-(typeof(softnet_data.xmit.recursion))(1)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); +} + +void __netif_schedule(struct Qdisc *q); +void netif_schedule_queue(struct netdev_queue *txq); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_schedule_all(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) + netif_schedule_queue(netdev_get_tx_queue(dev, i)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void netif_tx_start_queue(struct netdev_queue *dev_queue) +{ + clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_start_queue(struct net_device *dev) +{ + netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_start_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_start_queue(txq); + } +} + +void netif_tx_wake_queue(struct netdev_queue *dev_queue); +# 3223 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_wake_queue(struct net_device *dev) +{ + netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_wake_all_queues(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + netif_tx_wake_queue(txq); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void netif_tx_stop_queue(struct netdev_queue *dev_queue) +{ + set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} +# 3250 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_stop_queue(struct net_device *dev) +{ + netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); +} + +void netif_tx_stop_all_queues(struct net_device *dev); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_queue_stopped(const struct net_device *dev) +{ + return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_xmit_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & ((1 << __QUEUE_STATE_DRV_XOFF) | (1 << __QUEUE_STATE_STACK_XOFF)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & (((1 << __QUEUE_STATE_DRV_XOFF) | (1 << __QUEUE_STATE_STACK_XOFF)) | (1 << __QUEUE_STATE_FROZEN)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) +{ + return dev_queue->state & ((1 << __QUEUE_STATE_DRV_XOFF) | (1 << __QUEUE_STATE_FROZEN)); +} +# 3297 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) +{ + + prefetchw(&dev_queue->dql.num_queued); + +} +# 3311 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) +{ + + prefetchw(&dev_queue->dql.limit); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes) +{ + + dql_queued(&dev_queue->dql, bytes); + + if (__builtin_expect(!!(dql_avail(&dev_queue->dql) >= 0), 1)) + return; + + set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); + + + + + + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + + + if (__builtin_expect(!!(dql_avail(&dev_queue->dql) >= 0), 0)) + clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { + + dql_queued(&dev_queue->dql, bytes); + + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} +# 3371 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_sent_queue(struct net_device *dev, unsigned int bytes) +{ + netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __netdev_sent_queue(struct net_device *dev, + unsigned int bytes, + bool xmit_more) +{ + return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, + xmit_more); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_tx_completed_queue(struct netdev_queue *dev_queue, + unsigned int pkts, unsigned int bytes) +{ + + if (__builtin_expect(!!(!bytes), 0)) + return; + + dql_completed(&dev_queue->dql, bytes); + + + + + + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + + if (__builtin_expect(!!(dql_avail(&dev_queue->dql) < 0), 0)) + return; + + if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) + netif_schedule_queue(dev_queue); + +} +# 3418 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_completed_queue(struct net_device *dev, + unsigned int pkts, unsigned int bytes) +{ + netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_tx_reset_queue(struct netdev_queue *q) +{ + + clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); + dql_reset(&q->dql); + +} +# 3439 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_reset_queue(struct net_device *dev_queue) +{ + netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); +} +# 3452 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) +{ + if (__builtin_expect(!!(queue_index >= dev->real_num_tx_queues), 0)) { + do { if (net_ratelimit()) printk("\001" "4" "%s selects TX queue %d, but real number of TX queues is %d\n", dev->name, queue_index, dev->real_num_tx_queues); } while (0) + + ; + return 0; + } + + return queue_index; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_running(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_START, &dev->state); +} +# 3489 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_start_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + netif_tx_start_queue(txq); +} +# 3503 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_stop_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + netif_tx_stop_queue(txq); +} +# 3516 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __netif_subqueue_stopped(const struct net_device *dev, + u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + return netif_tx_queue_stopped(txq); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_subqueue_stopped(const struct net_device *dev, + struct sk_buff *skb) +{ + return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); +} +# 3537 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_wake_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + netif_tx_wake_queue(txq); +} + + +int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, + u16 index); +int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, + u16 index, bool is_rxqs_map); +# 3558 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_attr_test_mask(unsigned long j, + const unsigned long *mask, + unsigned int nr_bits) +{ + cpu_max_bits_warn(j, nr_bits); + return test_bit(j, mask); +} +# 3574 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_attr_test_online(unsigned long j, + const unsigned long *online_mask, + unsigned int nr_bits) +{ + cpu_max_bits_warn(j, nr_bits); + + if (online_mask) + return test_bit(j, online_mask); + + return (j < nr_bits); +} +# 3594 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int netif_attrmask_next(int n, const unsigned long *srcp, + unsigned int nr_bits) +{ + + if (n != -1) + cpu_max_bits_warn(n, nr_bits); + + if (srcp) + return find_next_bit(srcp, nr_bits, n + 1); + + return n + 1; +} +# 3616 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int netif_attrmask_next_and(int n, const unsigned long *src1p, + const unsigned long *src2p, + unsigned int nr_bits) +{ + + if (n != -1) + cpu_max_bits_warn(n, nr_bits); + + if (src1p && src2p) + return find_next_and_bit(src1p, src2p, nr_bits, n + 1); + else if (src1p) + return find_next_bit(src1p, nr_bits, n + 1); + else if (src2p) + return find_next_bit(src2p, nr_bits, n + 1); + + return n + 1; +} +# 3655 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_multiqueue(const struct net_device *dev) +{ + return dev->num_tx_queues > 1; +} + +int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); + + +int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); +# 3673 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct netdev_rx_queue * +__netif_get_rx_queue(struct net_device *dev, unsigned int rxq) +{ + return dev->_rx + rxq; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int get_netdev_rx_queue_index( + struct netdev_rx_queue *queue) +{ + struct net_device *dev = queue->dev; + int index = queue - dev->_rx; + + do { if (__builtin_expect(!!(index >= dev->num_rx_queues), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1238)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/netdevice.h"), "i" (3686), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1239)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + return index; +} + + + +int netif_get_num_default_rss_queues(void); + +enum skb_free_reason { + SKB_REASON_CONSUMED, + SKB_REASON_DROPPED, +}; + +void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); +void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); +# 3721 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_kfree_skb_irq(struct sk_buff *skb) +{ + __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_consume_skb_irq(struct sk_buff *skb) +{ + __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_kfree_skb_any(struct sk_buff *skb) +{ + __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_consume_skb_any(struct sk_buff *skb) +{ + __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); +} + +void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); +int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); +int netif_rx(struct sk_buff *skb); +int netif_rx_ni(struct sk_buff *skb); +int netif_receive_skb(struct sk_buff *skb); +int netif_receive_skb_core(struct sk_buff *skb); +void netif_receive_skb_list(struct list_head *head); +gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); +void napi_gro_flush(struct napi_struct *napi, bool flush_old); +struct sk_buff *napi_get_frags(struct napi_struct *napi); +gro_result_t napi_gro_frags(struct napi_struct *napi); +struct packet_offload *gro_find_receive_by_type(__be16 type); +struct packet_offload *gro_find_complete_by_type(__be16 type); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void napi_free_frags(struct napi_struct *napi) +{ + kfree_skb(napi->skb); + napi->skb = ((void *)0); +} + +bool netdev_is_rx_handler_busy(struct net_device *dev); +int netdev_rx_handler_register(struct net_device *dev, + rx_handler_func_t *rx_handler, + void *rx_handler_data); +void netdev_rx_handler_unregister(struct net_device *dev); + +bool dev_valid_name(const char *name); +int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, + bool *need_copyout); +int dev_ifconf(struct net *net, struct ifconf *, int); +int dev_ethtool(struct net *net, struct ifreq *); +unsigned int dev_get_flags(const struct net_device *); +int __dev_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack); +int dev_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack); +void __dev_notify_flags(struct net_device *, unsigned int old_flags, + unsigned int gchanges); +int dev_change_name(struct net_device *, const char *); +int dev_set_alias(struct net_device *, const char *, size_t); +int dev_get_alias(const struct net_device *, char *, size_t); +int dev_change_net_namespace(struct net_device *, struct net *, const char *); +int __dev_set_mtu(struct net_device *, int); +int dev_validate_mtu(struct net_device *dev, int mtu, + struct netlink_ext_ack *extack); +int dev_set_mtu_ext(struct net_device *dev, int mtu, + struct netlink_ext_ack *extack); +int dev_set_mtu(struct net_device *, int); +int dev_change_tx_queue_len(struct net_device *, unsigned long); +void dev_set_group(struct net_device *, int); +int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, + struct netlink_ext_ack *extack); +int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack); +int dev_change_carrier(struct net_device *, bool new_carrier); +int dev_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid); +int dev_get_phys_port_name(struct net_device *dev, + char *name, size_t len); +int dev_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid, bool recurse); +bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); +int dev_change_proto_down(struct net_device *dev, bool proto_down); +int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); +struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, int *ret); + +typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); +int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, + int fd, int expected_fd, u32 flags); +u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, + enum bpf_netdev_command cmd); +int xdp_umem_query(struct net_device *dev, u16 queue_id); + +int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); +int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); +bool is_skb_forwardable(const struct net_device *dev, + const struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) int ____dev_forward_skb(struct net_device *dev, + struct sk_buff *skb) +{ + if (skb_orphan_frags(skb, ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))) || + __builtin_expect(!!(!is_skb_forwardable(dev, skb)), 0)) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return 1; + } + + skb_scrub_packet(skb, true); + skb->priority = 0; + return 0; +} + +bool dev_nit_active(struct net_device *dev); +void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); + +extern int netdev_budget; +extern unsigned int netdev_budget_usecs; + + +void netdev_run_todo(void); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_put(struct net_device *dev) +{ + do { do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*dev->pcpu_refcnt)) { case 1: do { typedef typeof((*dev->pcpu_refcnt)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*dev->pcpu_refcnt))(1)) && ((-(typeof(*dev->pcpu_refcnt))(1)) == 1 || (-(typeof(*dev->pcpu_refcnt))(1)) == -1)) ? (int)(-(typeof(*dev->pcpu_refcnt))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*dev->pcpu_refcnt))(1)); (void)pao_tmp__; } switch (sizeof((*dev->pcpu_refcnt))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "qi" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "re" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*dev->pcpu_refcnt)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*dev->pcpu_refcnt))(1)) && ((-(typeof(*dev->pcpu_refcnt))(1)) == 1 || (-(typeof(*dev->pcpu_refcnt))(1)) == -1)) ? (int)(-(typeof(*dev->pcpu_refcnt))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*dev->pcpu_refcnt))(1)); (void)pao_tmp__; } switch (sizeof((*dev->pcpu_refcnt))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "qi" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "re" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*dev->pcpu_refcnt)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*dev->pcpu_refcnt))(1)) && ((-(typeof(*dev->pcpu_refcnt))(1)) == 1 || (-(typeof(*dev->pcpu_refcnt))(1)) == -1)) ? (int)(-(typeof(*dev->pcpu_refcnt))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*dev->pcpu_refcnt))(1)); (void)pao_tmp__; } switch (sizeof((*dev->pcpu_refcnt))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "qi" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "re" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*dev->pcpu_refcnt)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(*dev->pcpu_refcnt))(1)) && ((-(typeof(*dev->pcpu_refcnt))(1)) == 1 || (-(typeof(*dev->pcpu_refcnt))(1)) == -1)) ? (int)(-(typeof(*dev->pcpu_refcnt))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(*dev->pcpu_refcnt))(1)); (void)pao_tmp__; } switch (sizeof((*dev->pcpu_refcnt))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "qi" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "re" ((pao_T__)(-(typeof(*dev->pcpu_refcnt))(1)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dev_hold(struct net_device *dev) +{ + do { do { const void *__vpp_verify = (typeof((&(*dev->pcpu_refcnt)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(*dev->pcpu_refcnt)) { case 1: do { typedef typeof((*dev->pcpu_refcnt)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*dev->pcpu_refcnt))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((*dev->pcpu_refcnt)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*dev->pcpu_refcnt))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((*dev->pcpu_refcnt)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*dev->pcpu_refcnt))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((*dev->pcpu_refcnt)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((*dev->pcpu_refcnt))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((*dev->pcpu_refcnt)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} +# 3876 "./include/linux/netdevice.h" +void linkwatch_init_dev(struct net_device *dev); +void linkwatch_fire_event(struct net_device *dev); +void linkwatch_forget_dev(struct net_device *dev); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_carrier_ok(const struct net_device *dev) +{ + return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); +} + +unsigned long dev_trans_start(struct net_device *dev); + +void __netdev_watchdog_up(struct net_device *dev); + +void netif_carrier_on(struct net_device *dev); + +void netif_carrier_off(struct net_device *dev); +# 3911 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_dormant_on(struct net_device *dev) +{ + if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) + linkwatch_fire_event(dev); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_dormant_off(struct net_device *dev) +{ + if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) + linkwatch_fire_event(dev); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_dormant(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_DORMANT, &dev->state); +} +# 3951 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_testing_on(struct net_device *dev) +{ + if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) + linkwatch_fire_event(dev); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_testing_off(struct net_device *dev) +{ + if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) + linkwatch_fire_event(dev); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_testing(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_TESTING, &dev->state); +} +# 3987 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_oper_up(const struct net_device *dev) +{ + return (dev->operstate == IF_OPER_UP || + dev->operstate == IF_OPER_UNKNOWN ); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_device_present(struct net_device *dev) +{ + return test_bit(__LINK_STATE_PRESENT, &dev->state); +} + +void netif_device_detach(struct net_device *dev); + +void netif_device_attach(struct net_device *dev); + + + + + +enum { + NETIF_MSG_DRV_BIT, + NETIF_MSG_PROBE_BIT, + NETIF_MSG_LINK_BIT, + NETIF_MSG_TIMER_BIT, + NETIF_MSG_IFDOWN_BIT, + NETIF_MSG_IFUP_BIT, + NETIF_MSG_RX_ERR_BIT, + NETIF_MSG_TX_ERR_BIT, + NETIF_MSG_TX_QUEUED_BIT, + NETIF_MSG_INTR_BIT, + NETIF_MSG_TX_DONE_BIT, + NETIF_MSG_RX_STATUS_BIT, + NETIF_MSG_PKTDATA_BIT, + NETIF_MSG_HW_BIT, + NETIF_MSG_WOL_BIT, + + + + + NETIF_MSG_CLASS_COUNT, +}; + +# 4072 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) + return 0; + + return (1U << debug_value) - 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __netif_tx_lock(struct netdev_queue *txq, int cpu) +{ + spin_lock(&txq->_xmit_lock); + txq->xmit_lock_owner = cpu; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __netif_tx_acquire(struct netdev_queue *txq) +{ + (void)0; + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __netif_tx_release(struct netdev_queue *txq) +{ + (void)0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __netif_tx_lock_bh(struct netdev_queue *txq) +{ + spin_lock_bh(&txq->_xmit_lock); + txq->xmit_lock_owner = ({ __this_cpu_preempt_check("read"); ({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __netif_tx_trylock(struct netdev_queue *txq) +{ + bool ok = spin_trylock(&txq->_xmit_lock); + if (__builtin_expect(!!(ok), 1)) + txq->xmit_lock_owner = ({ __this_cpu_preempt_check("read"); ({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }); + return ok; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void txq_trans_update(struct netdev_queue *txq) +{ + if (txq->xmit_lock_owner != -1) + txq->trans_start = jiffies; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_trans_update(struct net_device *dev) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + + if (txq->trans_start != jiffies) + txq->trans_start = jiffies; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_lock(struct net_device *dev) +{ + unsigned int i; + int cpu; + + spin_lock(&dev->tx_global_lock); + cpu = ({ __this_cpu_preempt_check("read"); ({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + + + + + + + __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_lock_bh(struct net_device *dev) +{ + local_bh_disable(); + netif_tx_lock(dev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_unlock(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + + + + + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + netif_schedule_queue(txq); + } + spin_unlock(&dev->tx_global_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_unlock_bh(struct net_device *dev) +{ + netif_tx_unlock(dev); + local_bh_enable(); +} +# 4219 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_tx_disable(struct net_device *dev) +{ + unsigned int i; + int cpu; + + local_bh_disable(); + cpu = ({ __this_cpu_preempt_check("read"); ({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + __netif_tx_lock(txq, cpu); + netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); + } + local_bh_enable(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_addr_lock(struct net_device *dev) +{ + spin_lock(&dev->addr_list_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_addr_lock_nested(struct net_device *dev) +{ + do { _raw_spin_lock_nested(spinlock_check(&dev->addr_list_lock), dev->lower_level); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_addr_lock_bh(struct net_device *dev) +{ + spin_lock_bh(&dev->addr_list_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_addr_unlock(struct net_device *dev) +{ + spin_unlock(&dev->addr_list_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_addr_unlock_bh(struct net_device *dev) +{ + spin_unlock_bh(&dev->addr_list_lock); +} +# 4270 "./include/linux/netdevice.h" +void ether_setup(struct net_device *dev); + + +struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, + unsigned char name_assign_type, + void (*setup)(struct net_device *), + unsigned int txqs, unsigned int rxqs); + + + + + + + +int register_netdev(struct net_device *dev); +void unregister_netdev(struct net_device *dev); + +int devm_register_netdev(struct device *dev, struct net_device *ndev); + + +int __hw_addr_sync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, int addr_len); +void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, int addr_len); +int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)); +int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *, int), + int (*unsync)(struct net_device *, + const unsigned char *, int)); +void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *, int)); +void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)); +void __hw_addr_init(struct netdev_hw_addr_list *list); + + +int dev_addr_add(struct net_device *dev, const unsigned char *addr, + unsigned char addr_type); +int dev_addr_del(struct net_device *dev, const unsigned char *addr, + unsigned char addr_type); +void dev_addr_flush(struct net_device *dev); +int dev_addr_init(struct net_device *dev); + + +int dev_uc_add(struct net_device *dev, const unsigned char *addr); +int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); +int dev_uc_del(struct net_device *dev, const unsigned char *addr); +int dev_uc_sync(struct net_device *to, struct net_device *from); +int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); +void dev_uc_unsync(struct net_device *to, struct net_device *from); +void dev_uc_flush(struct net_device *dev); +void dev_uc_init(struct net_device *dev); +# 4342 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __dev_uc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +} +# 4358 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __dev_uc_unsync(struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + __hw_addr_unsync_dev(&dev->uc, dev, unsync); +} + + +int dev_mc_add(struct net_device *dev, const unsigned char *addr); +int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); +int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); +int dev_mc_del(struct net_device *dev, const unsigned char *addr); +int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); +int dev_mc_sync(struct net_device *to, struct net_device *from); +int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); +void dev_mc_unsync(struct net_device *to, struct net_device *from); +void dev_mc_flush(struct net_device *dev); +void dev_mc_init(struct net_device *dev); +# 4386 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __dev_mc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +} +# 4402 "./include/linux/netdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __dev_mc_unsync(struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) +{ + __hw_addr_unsync_dev(&dev->mc, dev, unsync); +} + + +void dev_set_rx_mode(struct net_device *dev); +void __dev_set_rx_mode(struct net_device *dev); +int dev_set_promiscuity(struct net_device *dev, int inc); +int dev_set_allmulti(struct net_device *dev, int inc); +void netdev_state_change(struct net_device *dev); +void netdev_notify_peers(struct net_device *dev); +void netdev_features_change(struct net_device *dev); + +void dev_load(struct net *net, const char *name); +struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *storage); +void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, + const struct net_device_stats *netdev_stats); + +extern int netdev_max_backlog; +extern int netdev_tstamp_prequeue; +extern int weight_p; +extern int dev_weight_rx_bias; +extern int dev_weight_tx_bias; +extern int dev_rx_weight; +extern int dev_tx_weight; +extern int gro_normal_batch; + +bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); +struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter); +struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter); +# 4446 "./include/linux/netdevice.h" +int netdev_walk_all_upper_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *upper_dev, + void *data), + void *data); + +bool netdev_has_upper_dev_all_rcu(struct net_device *dev, + struct net_device *upper_dev); + +bool netdev_has_any_upper_dev(struct net_device *dev); + +void *netdev_lower_get_next_private(struct net_device *dev, + struct list_head **iter); +void *netdev_lower_get_next_private_rcu(struct net_device *dev, + struct list_head **iter); +# 4473 "./include/linux/netdevice.h" +void *netdev_lower_get_next(struct net_device *dev, + struct list_head **iter); + + + + + + + +struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, + struct list_head **iter); +int netdev_walk_all_lower_dev(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); +int netdev_walk_all_lower_dev_rcu(struct net_device *dev, + int (*fn)(struct net_device *lower_dev, + void *data), + void *data); + +void *netdev_adjacent_get_private(struct list_head *adj_list); +void *netdev_lower_get_first_private_rcu(struct net_device *dev); +struct net_device *netdev_master_upper_dev_get(struct net_device *dev); +struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); +int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, + struct netlink_ext_ack *extack); +int netdev_master_upper_dev_link(struct net_device *dev, + struct net_device *upper_dev, + void *upper_priv, void *upper_info, + struct netlink_ext_ack *extack); +void netdev_upper_dev_unlink(struct net_device *dev, + struct net_device *upper_dev); +int netdev_adjacent_change_prepare(struct net_device *old_dev, + struct net_device *new_dev, + struct net_device *dev, + struct netlink_ext_ack *extack); +void netdev_adjacent_change_commit(struct net_device *old_dev, + struct net_device *new_dev, + struct net_device *dev); +void netdev_adjacent_change_abort(struct net_device *old_dev, + struct net_device *new_dev, + struct net_device *dev); +void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); +void *netdev_lower_dev_get_private(struct net_device *dev, + struct net_device *lower_dev); +void netdev_lower_state_changed(struct net_device *lower_dev, + void *lower_state_info); + + + +extern u8 netdev_rss_key[52] __attribute__((__section__(".data..read_mostly"))); +void netdev_rss_key_fill(void *buffer, size_t len); + +int skb_checksum_help(struct sk_buff *skb); +int skb_crc32c_csum_help(struct sk_buff *skb); +int skb_csum_hwoffload_help(struct sk_buff *skb, + const netdev_features_t features); + +struct sk_buff *__skb_gso_segment(struct sk_buff *skb, + netdev_features_t features, bool tx_path); +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features); + +struct netdev_bonding_info { + ifslave slave; + ifbond master; +}; + +struct netdev_notifier_bonding_info { + struct netdev_notifier_info info; + struct netdev_bonding_info bonding_info; +}; + +void netdev_bonding_info_change(struct net_device *dev, + struct netdev_bonding_info *bonding_info); + + +void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) +{ + return __skb_gso_segment(skb, features, true); +} +__be16 skb_network_protocol(struct sk_buff *skb, int *depth); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool can_checksum_protocol(netdev_features_t features, + __be16 protocol) +{ + if (protocol == (( __be16)(__u16)__builtin_bswap16((__u16)((0x8906))))) + return !!(features & ((netdev_features_t)1 << (NETIF_F_FCOE_CRC_BIT))); + + + + if (features & ((netdev_features_t)1 << (NETIF_F_HW_CSUM_BIT))) { + + return true; + } + + switch (protocol) { + case (( __be16)(__u16)__builtin_bswap16((__u16)((0x0800)))): + return !!(features & ((netdev_features_t)1 << (NETIF_F_IP_CSUM_BIT))); + case (( __be16)(__u16)__builtin_bswap16((__u16)((0x86DD)))): + return !!(features & ((netdev_features_t)1 << (NETIF_F_IPV6_CSUM_BIT))); + default: + return false; + } +} + + +void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); + + + + + + + +void net_enable_timestamp(void); +void net_disable_timestamp(void); + + +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) dev_proc_init(void); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, + struct sk_buff *skb, struct net_device *dev, + bool more) +{ + ({ __this_cpu_preempt_check("write"); do { do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.more)) { case 1: do { typedef typeof((softnet_data.xmit.more)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (more); (void)pto_tmp__; } switch (sizeof((softnet_data.xmit.more))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "qi" ((pto_T__)(more))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "ri" ((pto_T__)(more))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "ri" ((pto_T__)(more))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "re" ((pto_T__)(more))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((softnet_data.xmit.more)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (more); (void)pto_tmp__; } switch (sizeof((softnet_data.xmit.more))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "qi" ((pto_T__)(more))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "ri" ((pto_T__)(more))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "ri" ((pto_T__)(more))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "re" ((pto_T__)(more))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((softnet_data.xmit.more)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (more); (void)pto_tmp__; } switch (sizeof((softnet_data.xmit.more))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "qi" ((pto_T__)(more))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "ri" ((pto_T__)(more))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "ri" ((pto_T__)(more))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "re" ((pto_T__)(more))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((softnet_data.xmit.more)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (more); (void)pto_tmp__; } switch (sizeof((softnet_data.xmit.more))) { case 1: asm ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "qi" ((pto_T__)(more))); break; case 2: asm ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "ri" ((pto_T__)(more))); break; case 4: asm ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "ri" ((pto_T__)(more))); break; case 8: asm ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((softnet_data.xmit.more)) : "re" ((pto_T__)(more))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); }); + return ops->ndo_start_xmit(skb, dev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netdev_xmit_more(void) +{ + return ({ __this_cpu_preempt_check("read"); ({ typeof(softnet_data.xmit.more) pscr_ret__; do { const void *__vpp_verify = (typeof((&(softnet_data.xmit.more)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(softnet_data.xmit.more)) { case 1: pscr_ret__ = ({ typeof(softnet_data.xmit.more) pfo_ret__; switch (sizeof(softnet_data.xmit.more)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(softnet_data.xmit.more) pfo_ret__; switch (sizeof(softnet_data.xmit.more)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(softnet_data.xmit.more) pfo_ret__; switch (sizeof(softnet_data.xmit.more)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(softnet_data.xmit.more) pfo_ret__; switch (sizeof(softnet_data.xmit.more)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (softnet_data.xmit.more)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, bool more) +{ + const struct net_device_ops *ops = dev->netdev_ops; + netdev_tx_t rc; + + rc = __netdev_start_xmit(ops, skb, dev, more); + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + + return rc; +} + +int netdev_class_create_file_ns(const struct class_attribute *class_attr, + const void *ns); +void netdev_class_remove_file_ns(const struct class_attribute *class_attr, + const void *ns); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int netdev_class_create_file(const struct class_attribute *class_attr) +{ + return netdev_class_create_file_ns(class_attr, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netdev_class_remove_file(const struct class_attribute *class_attr) +{ + netdev_class_remove_file_ns(class_attr, ((void *)0)); +} + +extern const struct kobj_ns_type_operations net_ns_type_operations; + +const char *netdev_drivername(const struct net_device *dev); + +void linkwatch_run_queue(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) netdev_features_t netdev_intersect_features(netdev_features_t f1, + netdev_features_t f2) +{ + if ((f1 ^ f2) & ((netdev_features_t)1 << (NETIF_F_HW_CSUM_BIT))) { + if (f1 & ((netdev_features_t)1 << (NETIF_F_HW_CSUM_BIT))) + f1 |= (((netdev_features_t)1 << (NETIF_F_IP_CSUM_BIT))|((netdev_features_t)1 << (NETIF_F_IPV6_CSUM_BIT))); + else + f2 |= (((netdev_features_t)1 << (NETIF_F_IP_CSUM_BIT))|((netdev_features_t)1 << (NETIF_F_IPV6_CSUM_BIT))); + } + + return f1 & f2; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) netdev_features_t netdev_get_wanted_features( + struct net_device *dev) +{ + return (dev->features & ~dev->hw_features) | dev->wanted_features; +} +netdev_features_t netdev_increment_features(netdev_features_t all, + netdev_features_t one, netdev_features_t mask); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) netdev_features_t netdev_add_tso_features(netdev_features_t features, + netdev_features_t mask) +{ + return netdev_increment_features(features, (((netdev_features_t)1 << (NETIF_F_TSO_BIT)) | ((netdev_features_t)1 << (NETIF_F_TSO6_BIT)) | ((netdev_features_t)1 << (NETIF_F_TSO_ECN_BIT)) | ((netdev_features_t)1 << (NETIF_F_TSO_MANGLEID_BIT))), mask); +} + +int __netdev_update_features(struct net_device *dev); +void netdev_update_features(struct net_device *dev); +void netdev_change_features(struct net_device *dev); + +void netif_stacked_transfer_operstate(const struct net_device *rootdev, + struct net_device *dev); + +netdev_features_t passthru_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); +netdev_features_t netif_skb_features(struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool net_gso_ok(netdev_features_t features, int gso_type) +{ + netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; + + + do { extern void __compiletime_assert_1240(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_TCPV4 != (((netdev_features_t)1 << (NETIF_F_TSO_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1240(); } while (0); + do { extern void __compiletime_assert_1241(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_DODGY != (((netdev_features_t)1 << (NETIF_F_GSO_ROBUST_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1241(); } while (0); + do { extern void __compiletime_assert_1242(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_TCP_ECN != (((netdev_features_t)1 << (NETIF_F_TSO_ECN_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1242(); } while (0); + do { extern void __compiletime_assert_1243(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_TCP_FIXEDID != (((netdev_features_t)1 << (NETIF_F_TSO_MANGLEID_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1243(); } while (0); + do { extern void __compiletime_assert_1244(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_TCPV6 != (((netdev_features_t)1 << (NETIF_F_TSO6_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1244(); } while (0); + do { extern void __compiletime_assert_1245(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_FCOE != (((netdev_features_t)1 << (NETIF_F_FSO_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1245(); } while (0); + do { extern void __compiletime_assert_1246(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_GRE != (((netdev_features_t)1 << (NETIF_F_GSO_GRE_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1246(); } while (0); + do { extern void __compiletime_assert_1247(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_GRE_CSUM != (((netdev_features_t)1 << (NETIF_F_GSO_GRE_CSUM_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1247(); } while (0); + do { extern void __compiletime_assert_1248(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_IPXIP4 != (((netdev_features_t)1 << (NETIF_F_GSO_IPXIP4_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1248(); } while (0); + do { extern void __compiletime_assert_1249(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_IPXIP6 != (((netdev_features_t)1 << (NETIF_F_GSO_IPXIP6_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1249(); } while (0); + do { extern void __compiletime_assert_1250(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_UDP_TUNNEL != (((netdev_features_t)1 << (NETIF_F_GSO_UDP_TUNNEL_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1250(); } while (0); + do { extern void __compiletime_assert_1251(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_UDP_TUNNEL_CSUM != (((netdev_features_t)1 << (NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1251(); } while (0); + do { extern void __compiletime_assert_1252(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_PARTIAL != (((netdev_features_t)1 << (NETIF_F_GSO_PARTIAL_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1252(); } while (0); + do { extern void __compiletime_assert_1253(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_TUNNEL_REMCSUM != (((netdev_features_t)1 << (NETIF_F_GSO_TUNNEL_REMCSUM_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1253(); } while (0); + do { extern void __compiletime_assert_1254(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_SCTP != (((netdev_features_t)1 << (NETIF_F_GSO_SCTP_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1254(); } while (0); + do { extern void __compiletime_assert_1255(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_ESP != (((netdev_features_t)1 << (NETIF_F_GSO_ESP_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1255(); } while (0); + do { extern void __compiletime_assert_1256(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_UDP != (((netdev_features_t)1 << (NETIF_F_GSO_UDP_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1256(); } while (0); + do { extern void __compiletime_assert_1257(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_UDP_L4 != (((netdev_features_t)1 << (NETIF_F_GSO_UDP_L4_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1257(); } while (0); + do { extern void __compiletime_assert_1258(void) __attribute__((__error__("BUILD_BUG_ON failed: " "SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)"))); if (!(!(SKB_GSO_FRAGLIST != (((netdev_features_t)1 << (NETIF_F_GSO_FRAGLIST_BIT)) >> NETIF_F_GSO_SHIFT)))) __compiletime_assert_1258(); } while (0); + + return (features & feature) == feature; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) +{ + return net_gso_ok(features, ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type) && + (!skb_has_frag_list(skb) || (features & ((netdev_features_t)1 << (NETIF_F_FRAGLIST_BIT)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_needs_gso(struct sk_buff *skb, + netdev_features_t features) +{ + return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || + __builtin_expect(!!((skb->ip_summed != 3) && (skb->ip_summed != 1)), 0) + ); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_set_gso_max_size(struct net_device *dev, + unsigned int size) +{ + dev->gso_max_size = size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, + int pulled_hlen, u16 mac_offset, + int mac_len) +{ + skb->protocol = protocol; + skb->encapsulation = 1; + skb_push(skb, pulled_hlen); + skb_reset_transport_header(skb); + skb->mac_header = mac_offset; + skb->network_header = skb->mac_header + mac_len; + skb->mac_len = mac_len; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_macsec(const struct net_device *dev) +{ + return dev->priv_flags & IFF_MACSEC; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_macvlan(const struct net_device *dev) +{ + return dev->priv_flags & IFF_MACVLAN; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_macvlan_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_MACVLAN_PORT; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_bond_master(const struct net_device *dev) +{ + return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_bond_slave(const struct net_device *dev) +{ + return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_supports_nofcs(struct net_device *dev) +{ + return dev->priv_flags & IFF_SUPP_NOFCS; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_has_l3_rx_handler(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_l3_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_MASTER; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_l3_slave(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_SLAVE; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_bridge_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_EBRIDGE; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_bridge_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_BRIDGE_PORT; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_ovs_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_OPENVSWITCH; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_ovs_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_OVS_DATAPATH; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_team_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_TEAM; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_team_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_TEAM_PORT; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_lag_master(const struct net_device *dev) +{ + return netif_is_bond_master(dev) || netif_is_team_master(dev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_lag_port(const struct net_device *dev) +{ + return netif_is_bond_slave(dev) || netif_is_team_port(dev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_rxfh_configured(const struct net_device *dev) +{ + return dev->priv_flags & IFF_RXFH_CONFIGURED; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_failover(const struct net_device *dev) +{ + return dev->priv_flags & IFF_FAILOVER; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_is_failover_slave(const struct net_device *dev) +{ + return dev->priv_flags & IFF_FAILOVER_SLAVE; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void netif_keep_dst(struct net_device *dev) +{ + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_reduces_vlan_mtu(struct net_device *dev) +{ + + return dev->priv_flags & IFF_MACSEC; +} + +extern struct pernet_operations loopback_net_ops; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *netdev_name(const struct net_device *dev) +{ + if (!dev->name[0] || strchr(dev->name, '%')) + return "(unnamed net_device)"; + return dev->name; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netdev_unregistering(const struct net_device *dev) +{ + return dev->reg_state == NETREG_UNREGISTERING; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char *netdev_reg_state(const struct net_device *dev) +{ + switch (dev->reg_state) { + case NETREG_UNINITIALIZED: return " (uninitialized)"; + case NETREG_REGISTERED: return ""; + case NETREG_UNREGISTERING: return " (unregistering)"; + case NETREG_UNREGISTERED: return " (unregistered)"; + case NETREG_RELEASED: return " (released)"; + case NETREG_DUMMY: return " (dummy)"; + } + + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(1); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1259)); }); __warn_printk("%s: unknown reg_state %d\n", dev->name, dev->reg_state); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1260)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/netdevice.h"), "i" (4899), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1261)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1262)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1263)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + return " (unknown)"; +} + +__attribute__((__format__(printf, 3, 4))) __attribute__((__cold__)) +void netdev_printk(const char *level, const struct net_device *dev, + const char *format, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void netdev_emerg(const struct net_device *dev, const char *format, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void netdev_alert(const struct net_device *dev, const char *format, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void netdev_crit(const struct net_device *dev, const char *format, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void netdev_err(const struct net_device *dev, const char *format, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void netdev_warn(const struct net_device *dev, const char *format, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void netdev_notice(const struct net_device *dev, const char *format, ...); +__attribute__((__format__(printf, 2, 3))) __attribute__((__cold__)) +void netdev_info(const struct net_device *dev, const char *format, ...); +# 5081 "./include/linux/netdevice.h" +extern struct net_device *blackhole_netdev; +# 47 "./include/net/sock.h" 2 + + + + + +# 1 "./include/linux/page_counter.h" 1 +# 9 "./include/linux/page_counter.h" +struct page_counter { + atomic_long_t usage; + unsigned long min; + unsigned long low; + unsigned long high; + unsigned long max; + struct page_counter *parent; + + + unsigned long emin; + atomic_long_t min_usage; + atomic_long_t children_min_usage; + + + unsigned long elow; + atomic_long_t low_usage; + atomic_long_t children_low_usage; + + + unsigned long watermark; + unsigned long failcnt; +}; + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_counter_init(struct page_counter *counter, + struct page_counter *parent) +{ + atomic_long_set(&counter->usage, 0); + counter->max = (((long)(~0UL >> 1)) / ((1UL) << 12)); + counter->parent = parent; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long page_counter_read(struct page_counter *counter) +{ + return atomic_long_read(&counter->usage); +} + +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); +bool page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); +void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_counter_set_high(struct page_counter *counter, + unsigned long nr_pages) +{ + do { do { extern void __compiletime_assert_1264(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(counter->high) == sizeof(char) || sizeof(counter->high) == sizeof(short) || sizeof(counter->high) == sizeof(int) || sizeof(counter->high) == sizeof(long)) || sizeof(counter->high) == sizeof(long long))) __compiletime_assert_1264(); } while (0); do { *(volatile typeof(counter->high) *)&(counter->high) = (nr_pages); } while (0); } while (0); +} + +int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); +int page_counter_memparse(const char *buf, const char *max, + unsigned long *nr_pages); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void page_counter_reset_watermark(struct page_counter *counter) +{ + counter->watermark = page_counter_read(counter); +} +# 53 "./include/net/sock.h" 2 +# 1 "./include/linux/memcontrol.h" 1 +# 18 "./include/linux/memcontrol.h" +# 1 "./include/linux/vmpressure.h" 1 +# 11 "./include/linux/vmpressure.h" +# 1 "./include/linux/eventfd.h" 1 +# 32 "./include/linux/eventfd.h" +struct eventfd_ctx; +struct file; + + + +void eventfd_ctx_put(struct eventfd_ctx *ctx); +struct file *eventfd_fget(int fd); +struct eventfd_ctx *eventfd_ctx_fdget(int fd); +struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); +__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); +int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, + __u64 *cnt); + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_eventfd_wake_count; extern __attribute__((section(".data..percpu" ""))) __typeof__(int) eventfd_wake_count; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool eventfd_signal_count(void) +{ + return ({ typeof(eventfd_wake_count) pscr_ret__; do { const void *__vpp_verify = (typeof((&(eventfd_wake_count)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(eventfd_wake_count)) { case 1: pscr_ret__ = ({ typeof(eventfd_wake_count) pfo_ret__; switch (sizeof(eventfd_wake_count)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(eventfd_wake_count) pfo_ret__; switch (sizeof(eventfd_wake_count)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(eventfd_wake_count) pfo_ret__; switch (sizeof(eventfd_wake_count)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(eventfd_wake_count) pfo_ret__; switch (sizeof(eventfd_wake_count)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (eventfd_wake_count)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); +} +# 12 "./include/linux/vmpressure.h" 2 + +struct vmpressure { + unsigned long scanned; + unsigned long reclaimed; + + unsigned long tree_scanned; + unsigned long tree_reclaimed; + + spinlock_t sr_lock; + + + struct list_head events; + + struct mutex events_lock; + + struct work_struct work; +}; + +struct mem_cgroup; + + +extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, + unsigned long scanned, unsigned long reclaimed); +extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); + +extern void vmpressure_init(struct vmpressure *vmpr); +extern void vmpressure_cleanup(struct vmpressure *vmpr); +extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); +extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); +extern int vmpressure_register_event(struct mem_cgroup *memcg, + struct eventfd_ctx *eventfd, + const char *args); +extern void vmpressure_unregister_event(struct mem_cgroup *memcg, + struct eventfd_ctx *eventfd); +# 19 "./include/linux/memcontrol.h" 2 + + + +# 1 "./include/linux/writeback.h" 1 +# 14 "./include/linux/writeback.h" +# 1 "./include/linux/blk-cgroup.h" 1 +# 36 "./include/linux/blk-cgroup.h" +enum blkg_iostat_type { + BLKG_IOSTAT_READ, + BLKG_IOSTAT_WRITE, + BLKG_IOSTAT_DISCARD, + + BLKG_IOSTAT_NR, +}; + +struct blkcg_gq; + +struct blkcg { + struct cgroup_subsys_state css; + spinlock_t lock; + refcount_t online_pin; + + struct xarray blkg_tree; + struct blkcg_gq *blkg_hint; + struct hlist_head blkg_list; + + struct blkcg_policy_data *cpd[5]; + + struct list_head all_blkcgs_node; + + struct list_head cgwb_list; + +}; + +struct blkg_iostat { + u64 bytes[BLKG_IOSTAT_NR]; + u64 ios[BLKG_IOSTAT_NR]; +}; + +struct blkg_iostat_set { + struct u64_stats_sync sync; + struct blkg_iostat cur; + struct blkg_iostat last; +}; +# 85 "./include/linux/blk-cgroup.h" +struct blkg_policy_data { + + struct blkcg_gq *blkg; + int plid; +}; +# 98 "./include/linux/blk-cgroup.h" +struct blkcg_policy_data { + + struct blkcg *blkcg; + int plid; +}; + + +struct blkcg_gq { + + struct request_queue *q; + struct list_head q_node; + struct hlist_node blkcg_node; + struct blkcg *blkcg; + + + + + + struct bdi_writeback_congested *wb_congested; + + + struct blkcg_gq *parent; + + + struct percpu_ref refcnt; + + + bool online; + + struct blkg_iostat_set *iostat_cpu; + struct blkg_iostat_set iostat; + + struct blkg_policy_data *pd[5]; + + spinlock_t async_bio_lock; + struct bio_list async_bios; + struct work_struct async_bio_work; + + atomic_t use_delay; + atomic64_t delay_nsec; + atomic64_t delay_start; + u64 last_delay; + int last_use; + + struct callback_head callback_head; +}; + +typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); +typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); +typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); +typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); +typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, + struct request_queue *q, struct blkcg *blkcg); +typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); +typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); +typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, + size_t size); + +struct blkcg_policy { + int plid; + + struct cftype *dfl_cftypes; + struct cftype *legacy_cftypes; + + + blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; + blkcg_pol_init_cpd_fn *cpd_init_fn; + blkcg_pol_free_cpd_fn *cpd_free_fn; + blkcg_pol_bind_cpd_fn *cpd_bind_fn; + + blkcg_pol_alloc_pd_fn *pd_alloc_fn; + blkcg_pol_init_pd_fn *pd_init_fn; + blkcg_pol_online_pd_fn *pd_online_fn; + blkcg_pol_offline_pd_fn *pd_offline_fn; + blkcg_pol_free_pd_fn *pd_free_fn; + blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; + blkcg_pol_stat_pd_fn *pd_stat_fn; +}; + +extern struct blkcg blkcg_root; +extern struct cgroup_subsys_state * const blkcg_root_css; +extern bool blkcg_debug_stats; + +struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, + struct request_queue *q, bool update_hint); +struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); +struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); +int blkcg_init_queue(struct request_queue *q); +void blkcg_exit_queue(struct request_queue *q); + + +int blkcg_policy_register(struct blkcg_policy *pol); +void blkcg_policy_unregister(struct blkcg_policy *pol); +int blkcg_activate_policy(struct request_queue *q, + const struct blkcg_policy *pol); +void blkcg_deactivate_policy(struct request_queue *q, + const struct blkcg_policy *pol); + +const char *blkg_dev_name(struct blkcg_gq *blkg); +void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, + u64 (*prfill)(struct seq_file *, + struct blkg_policy_data *, int), + const struct blkcg_policy *pol, int data, + bool show_total); +u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); + +struct blkg_conf_ctx { + struct gendisk *disk; + struct blkcg_gq *blkg; + char *body; +}; + +struct gendisk *blkcg_conf_get_disk(char **inputp); +int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, + char *input, struct blkg_conf_ctx *ctx); +void blkg_conf_finish(struct blkg_conf_ctx *ctx); +# 227 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup_subsys_state *blkcg_css(void) +{ + struct cgroup_subsys_state *css; + + css = kthread_blkcg(); + if (css) + return css; + return task_css(get_current(), io_cgrp_id); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) +{ + return css ? ({ void *__mptr = (void *)(css); do { extern void __compiletime_assert_1265(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(css)), typeof(((struct blkcg *)0)->css)) && !__builtin_types_compatible_p(typeof(*(css)), typeof(void))))) __compiletime_assert_1265(); } while (0); ((struct blkcg *)(__mptr - __builtin_offsetof(struct blkcg, css))); }) : ((void *)0); +} +# 256 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg *__bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return css_to_blkcg(blkcg_css()); +} +# 271 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blk_cgroup_congested(void) +{ + struct cgroup_subsys_state *css; + bool ret = false; + + rcu_read_lock(); + css = kthread_blkcg(); + if (!css) + css = task_css(get_current(), io_cgrp_id); + while (css) { + if (atomic_read(&css->cgroup->congestion_count)) { + ret = true; + break; + } + css = css->parent; + } + rcu_read_unlock(); + return ret; +} +# 309 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bio_issue_as_root_blkg(struct bio *bio) +{ + return (bio->bi_opf & ((1ULL << __REQ_META) | (1ULL << __REQ_SWAP))) != 0; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg *blkcg_parent(struct blkcg *blkcg) +{ + return css_to_blkcg(blkcg->css.parent); +} +# 336 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, + struct request_queue *q, + bool update_hint) +{ + struct blkcg_gq *blkg; + + if (blkcg == &blkcg_root) + return q->root_blkg; + + blkg = ({ typeof(*(blkcg->blkg_hint)) *________p1 = (typeof(*(blkcg->blkg_hint)) *)({ do { extern void __compiletime_assert_1266(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((blkcg->blkg_hint)) == sizeof(char) || sizeof((blkcg->blkg_hint)) == sizeof(short) || sizeof((blkcg->blkg_hint)) == sizeof(int) || sizeof((blkcg->blkg_hint)) == sizeof(long)) || sizeof((blkcg->blkg_hint)) == sizeof(long long))) __compiletime_assert_1266(); } while (0); ({ typeof( _Generic(((blkcg->blkg_hint)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((blkcg->blkg_hint)))) __x = (*(const volatile typeof( _Generic(((blkcg->blkg_hint)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((blkcg->blkg_hint)))) *)&((blkcg->blkg_hint))); do { } while (0); (typeof((blkcg->blkg_hint)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/blk-cgroup.h", 345, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(blkcg->blkg_hint)) *)(________p1)); }); + if (blkg && blkg->q == q) + return blkg; + + return blkg_lookup_slowpath(blkcg, q, update_hint); +} +# 360 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, + struct request_queue *q) +{ + ({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1267)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/blk-cgroup.h"), "i" (363), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1268)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1269)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return __blkg_lookup(blkcg, q, false); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) +{ + return q->root_blkg; +} +# 385 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, + struct blkcg_policy *pol) +{ + return blkg ? blkg->pd[pol->plid] : ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, + struct blkcg_policy *pol) +{ + return blkcg ? blkcg->cpd[pol->plid] : ((void *)0); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) +{ + return pd ? pd->blkg : ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) +{ + return cpd ? cpd->blkcg : ((void *)0); +} + +extern void blkcg_destroy_blkgs(struct blkcg *blkcg); +# 423 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blkcg_pin_online(struct blkcg *blkcg) +{ + refcount_inc(&blkcg->online_pin); +} +# 437 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blkcg_unpin_online(struct blkcg *blkcg) +{ + do { + if (!refcount_dec_and_test(&blkcg->online_pin)) + break; + blkcg_destroy_blkgs(blkcg); + blkcg = blkcg_parent(blkcg); + } while (blkcg); +} +# 455 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) +{ + return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blkg_get(struct blkcg_gq *blkg) +{ + percpu_ref_get(&blkg->refcnt); +} +# 478 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blkg_tryget(struct blkcg_gq *blkg) +{ + return blkg && percpu_ref_tryget(&blkg->refcnt); +} +# 492 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) +{ + struct blkcg_gq *ret_blkg = ((void *)0); + + ({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1270)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/blk-cgroup.h"), "i" (496), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1271)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1272)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + + while (blkg) { + if (blkg_tryget(blkg)) { + ret_blkg = blkg; + break; + } + blkg = blkg->parent; + } + + return ret_blkg; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blkg_put(struct blkcg_gq *blkg) +{ + percpu_ref_put(&blkg->refcnt); +} +# 551 "./include/linux/blk-cgroup.h" +extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, + struct bio *bio); + + + + + +bool __blkcg_punt_bio_submit(struct bio *bio); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blkcg_punt_bio_submit(struct bio *bio) +{ + if (bio->bi_opf & (1ULL << __REQ_CGROUP_PUNT)) + return __blkcg_punt_bio_submit(bio); + else + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blkcg_bio_issue_init(struct bio *bio) +{ + bio_issue_init(&bio->bi_issue, (((bio)->bi_iter).bi_size >> 9)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool blkcg_bio_issue_check(struct request_queue *q, + struct bio *bio) +{ + struct blkcg_gq *blkg; + bool throtl = false; + + rcu_read_lock(); + + if (!bio->bi_blkg) { + char b[32]; + + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(1); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1273)); }); __warn_printk("no blkg associated for bio on block-device: %s\n", bio_devname(bio, b)); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1274)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/blk-cgroup.h"), "i" (584), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1275)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1276)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1277)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }) + + ; + bio_associate_blkg(bio); + } + + blkg = bio->bi_blkg; + + throtl = blk_throtl_bio(q, blkg, bio); + + if (!throtl) { + struct blkg_iostat_set *bis; + int rwd, cpu; + + if (op_is_discard(bio->bi_opf)) + rwd = BLKG_IOSTAT_DISCARD; + else if (op_is_write(bio->bi_opf)) + rwd = BLKG_IOSTAT_WRITE; + else + rwd = BLKG_IOSTAT_READ; + + cpu = ({ do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); ({ __this_cpu_preempt_check("read"); ({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); }); }); + bis = ({ do { const void *__vpp_verify = (typeof((blkg->iostat_cpu) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((blkg->iostat_cpu))) *)((blkg->iostat_cpu)))); (typeof((typeof(*((blkg->iostat_cpu))) *)((blkg->iostat_cpu)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }); + u64_stats_update_begin(&bis->sync); + + + + + + + if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { + bio_set_flag(bio, BIO_CGROUP_ACCT); + bis->cur.bytes[rwd] += bio->bi_iter.bi_size; + } + bis->cur.ios[rwd]++; + + u64_stats_update_end(&bis->sync); + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&io_cgrp_subsys_on_dfl_key), struct static_key_true)) branch = !arch_static_branch(&(&io_cgrp_subsys_on_dfl_key)->key, true); else if (__builtin_types_compatible_p(typeof(*&io_cgrp_subsys_on_dfl_key), struct static_key_false)) branch = !arch_static_branch_jump(&(&io_cgrp_subsys_on_dfl_key)->key, true); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 1); })) + cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + } + + blkcg_bio_issue_init(bio); + + rcu_read_unlock(); + return !throtl; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blkcg_use_delay(struct blkcg_gq *blkg) +{ + if (({ int __ret_warn_on = !!(atomic_read(&blkg->use_delay) < 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1278)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/blk-cgroup.h"), "i" (634), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1279)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1280)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return; + if (atomic_add_return(1, &blkg->use_delay) == 1) + atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int blkcg_unuse_delay(struct blkcg_gq *blkg) +{ + int old = atomic_read(&blkg->use_delay); + + if (({ int __ret_warn_on = !!(old < 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1281)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/blk-cgroup.h"), "i" (644), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1282)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1283)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return 0; + if (old == 0) + return 0; +# 656 "./include/linux/blk-cgroup.h" + while (old) { + int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); + if (cur == old) + break; + old = cur; + } + + if (old == 0) + return 0; + if (old == 1) + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); + return 1; +} +# 679 "./include/linux/blk-cgroup.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) +{ + int old = atomic_read(&blkg->use_delay); + + + if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) + atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); + + atomic64_set(&blkg->delay_nsec, delay); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void blkcg_clear_delay(struct blkcg_gq *blkg) +{ + int old = atomic_read(&blkg->use_delay); + + + if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); +} + +void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); +void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); +void blkcg_maybe_throttle_current(void); +# 15 "./include/linux/writeback.h" 2 + +struct bio; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_dirty_throttle_leaks; extern __attribute__((section(".data..percpu" ""))) __typeof__(int) dirty_throttle_leaks; +# 36 "./include/linux/writeback.h" +struct backing_dev_info; + + + + +enum writeback_sync_modes { + WB_SYNC_NONE, + WB_SYNC_ALL, +}; + + + + + + +struct writeback_control { + long nr_to_write; + + long pages_skipped; + + + + + + + loff_t range_start; + loff_t range_end; + + enum writeback_sync_modes sync_mode; + + unsigned for_kupdate:1; + unsigned for_background:1; + unsigned tagged_writepages:1; + unsigned for_reclaim:1; + unsigned range_cyclic:1; + unsigned for_sync:1; + + + + + + + + unsigned no_cgroup_owner:1; + + unsigned punt_to_cgroup:1; + + + struct bdi_writeback *wb; + struct inode *inode; + + + int wb_id; + int wb_lcand_id; + int wb_tcand_id; + size_t wb_bytes; + size_t wb_lcand_bytes; + size_t wb_tcand_bytes; + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int wbc_to_write_flags(struct writeback_control *wbc) +{ + int flags = 0; + + if (wbc->punt_to_cgroup) + flags = (1ULL << __REQ_CGROUP_PUNT); + + if (wbc->sync_mode == WB_SYNC_ALL) + flags |= (1ULL << __REQ_SYNC); + else if (wbc->for_kupdate || wbc->for_background) + flags |= (1ULL << __REQ_BACKGROUND); + + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct cgroup_subsys_state * +wbc_blkcg_css(struct writeback_control *wbc) +{ + + if (wbc->wb) + return wbc->wb->blkcg_css; + + return blkcg_root_css; +} +# 129 "./include/linux/writeback.h" +struct wb_domain { + spinlock_t lock; +# 149 "./include/linux/writeback.h" + struct fprop_global completions; + struct timer_list period_timer; + unsigned long period_time; +# 163 "./include/linux/writeback.h" + unsigned long dirty_limit_tstamp; + unsigned long dirty_limit; +}; +# 179 "./include/linux/writeback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wb_domain_size_changed(struct wb_domain *dom) +{ + spin_lock(&dom->lock); + dom->dirty_limit_tstamp = jiffies; + dom->dirty_limit = 0; + spin_unlock(&dom->lock); +} + + + + +struct bdi_writeback; +void writeback_inodes_sb(struct super_block *, enum wb_reason reason); +void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, + enum wb_reason reason); +void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason); +void sync_inodes_sb(struct super_block *); +void wakeup_flusher_threads(enum wb_reason reason); +void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, + enum wb_reason reason); +void inode_wait_for_writeback(struct inode *inode); +void inode_io_list_del(struct inode *inode); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wait_on_inode(struct inode *inode) +{ + do { __might_sleep("include/linux/writeback.h", 205, 0); do { } while (0); } while (0); + wait_on_bit(&inode->i_state, 3, 0x0002); +} + + + + + + +void __inode_attach_wb(struct inode *inode, struct page *page); +void wbc_attach_and_unlock_inode(struct writeback_control *wbc, + struct inode *inode) + ; +void wbc_detach_inode(struct writeback_control *wbc); +void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, + size_t bytes); +int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr_pages, + enum wb_reason reason, struct wb_completion *done); +void cgroup_writeback_umount(void); +# 234 "./include/linux/writeback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_attach_wb(struct inode *inode, struct page *page) +{ + if (!inode->i_wb) + __inode_attach_wb(inode, page); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inode_detach_wb(struct inode *inode) +{ + if (inode->i_wb) { + ({ int __ret_warn_on = !!(!(inode->i_state & (1 << 6))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1284)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/writeback.h"), "i" (249), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1285)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1286)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + wb_put(inode->i_wb); + inode->i_wb = ((void *)0); + } +} +# 264 "./include/linux/writeback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, + struct inode *inode) +{ + spin_lock(&inode->i_lock); + inode_attach_wb(inode, ((void *)0)); + wbc_attach_and_unlock_inode(wbc, inode); +} +# 282 "./include/linux/writeback.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) +{ + + + + + + + if (wbc->wb) + bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); +} +# 339 "./include/linux/writeback.h" +void laptop_io_completion(struct backing_dev_info *info); +void laptop_sync_completion(void); +void laptop_mode_sync(struct work_struct *work); +void laptop_mode_timer_fn(struct timer_list *t); + + + +bool node_dirty_ok(struct pglist_data *pgdat); +int wb_domain_init(struct wb_domain *dom, gfp_t gfp); + +void wb_domain_exit(struct wb_domain *dom); + + +extern struct wb_domain global_wb_domain; + + +extern int dirty_background_ratio; +extern unsigned long dirty_background_bytes; +extern int vm_dirty_ratio; +extern unsigned long vm_dirty_bytes; +extern unsigned int dirty_writeback_interval; +extern unsigned int dirty_expire_interval; +extern unsigned int dirtytime_expire_interval; +extern int vm_highmem_is_dirtyable; +extern int block_dump; +extern int laptop_mode; + +int dirty_background_ratio_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_background_bytes_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_ratio_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_bytes_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirtytime_interval_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); + +void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); +unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); + +void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time); +void balance_dirty_pages_ratelimited(struct address_space *mapping); +bool wb_over_bg_thresh(struct bdi_writeback *wb); + +typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, + void *data); + +int generic_writepages(struct address_space *mapping, + struct writeback_control *wbc); +void tag_pages_for_writeback(struct address_space *mapping, + unsigned long start, unsigned long end); +int write_cache_pages(struct address_space *mapping, + struct writeback_control *wbc, writepage_t writepage, + void *data); +int do_writepages(struct address_space *mapping, struct writeback_control *wbc); +void writeback_set_ratelimit(void); +void tag_pages_for_writeback(struct address_space *mapping, + unsigned long start, unsigned long end); + +void account_page_redirty(struct page *page); + +void sb_mark_inode_writeback(struct inode *inode); +void sb_clear_inode_writeback(struct inode *inode); +# 23 "./include/linux/memcontrol.h" 2 + + +struct mem_cgroup; +struct page; +struct mm_struct; +struct kmem_cache; + + +enum memcg_stat_item { + MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, + MEMCG_SOCK, + + MEMCG_KERNEL_STACK_KB, + MEMCG_NR_STAT, +}; + +enum memcg_memory_event { + MEMCG_LOW, + MEMCG_HIGH, + MEMCG_MAX, + MEMCG_OOM, + MEMCG_OOM_KILL, + MEMCG_SWAP_HIGH, + MEMCG_SWAP_MAX, + MEMCG_SWAP_FAIL, + MEMCG_NR_MEMORY_EVENTS, +}; + +enum mem_cgroup_protection { + MEMCG_PROT_NONE, + MEMCG_PROT_LOW, + MEMCG_PROT_MIN, +}; + +struct mem_cgroup_reclaim_cookie { + pg_data_t *pgdat; + unsigned int generation; +}; + + + + + + +struct mem_cgroup_id { + int id; + refcount_t ref; +}; + + + + + + + +enum mem_cgroup_events_target { + MEM_CGROUP_TARGET_THRESH, + MEM_CGROUP_TARGET_SOFTLIMIT, + MEM_CGROUP_NTARGETS, +}; + +struct memcg_vmstats_percpu { + long stat[MEMCG_NR_STAT]; + unsigned long events[NR_VM_EVENT_ITEMS]; + unsigned long nr_page_events; + unsigned long targets[MEM_CGROUP_NTARGETS]; +}; + +struct mem_cgroup_reclaim_iter { + struct mem_cgroup *position; + + unsigned int generation; +}; + +struct lruvec_stat { + long count[NR_VM_NODE_STAT_ITEMS]; +}; + + + + + +struct memcg_shrinker_map { + struct callback_head rcu; + unsigned long map[]; +}; + + + + +struct mem_cgroup_per_node { + struct lruvec lruvec; + + + struct lruvec_stat *lruvec_stat_local; + + + struct lruvec_stat *lruvec_stat_cpu; + atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; + + unsigned long lru_zone_size[5][NR_LRU_LISTS]; + + struct mem_cgroup_reclaim_iter iter; + + struct memcg_shrinker_map *shrinker_map; + + struct rb_node tree_node; + unsigned long usage_in_excess; + + bool on_tree; + struct mem_cgroup *memcg; + +}; + +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + unsigned long threshold; +}; + + +struct mem_cgroup_threshold_ary { + + int current_threshold; + + unsigned int size; + + struct mem_cgroup_threshold entries[]; +}; + +struct mem_cgroup_thresholds { + + struct mem_cgroup_threshold_ary *primary; + + + + + + struct mem_cgroup_threshold_ary *spare; +}; + +enum memcg_kmem_state { + KMEM_NONE, + KMEM_ALLOCATED, + KMEM_ONLINE, +}; + + +struct memcg_padding { + char x[0]; +} __attribute__((__aligned__(1 << (12)))); +# 188 "./include/linux/memcontrol.h" +struct memcg_cgwb_frn { + u64 bdi_id; + int memcg_id; + u64 at; + struct wb_completion done; +}; + + + + + + + +struct mem_cgroup { + struct cgroup_subsys_state css; + + + struct mem_cgroup_id id; + + + struct page_counter memory; + struct page_counter swap; + + + struct page_counter memsw; + struct page_counter kmem; + struct page_counter tcpmem; + + + struct work_struct high_work; + + unsigned long soft_limit; + + + struct vmpressure vmpressure; + + + + + bool use_hierarchy; + + + + + bool oom_group; + + + bool oom_lock; + int under_oom; + + int swappiness; + + int oom_kill_disable; + + + struct cgroup_file events_file; + struct cgroup_file events_local_file; + + + struct cgroup_file swap_events_file; + + + struct mutex thresholds_lock; + + + struct mem_cgroup_thresholds thresholds; + + + struct mem_cgroup_thresholds memsw_thresholds; + + + struct list_head oom_notify; + + + + + + unsigned long move_charge_at_immigrate; + + spinlock_t move_lock; + unsigned long move_lock_flags; + + struct memcg_padding _pad1_;; + + + + + atomic_t moving_account; + struct task_struct *move_lock_task; + + + struct memcg_vmstats_percpu *vmstats_local; + + + struct memcg_vmstats_percpu *vmstats_percpu; + + struct memcg_padding _pad2_;; + + atomic_long_t vmstats[MEMCG_NR_STAT]; + atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; + + + atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; + atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; + + unsigned long socket_pressure; + + + bool tcpmem_active; + int tcpmem_pressure; + + + + int kmemcg_id; + enum memcg_kmem_state kmem_state; + struct list_head kmem_caches; + + + + struct list_head cgwb_list; + struct wb_domain cgwb_domain; + struct memcg_cgwb_frn cgwb_frn[4]; + + + + struct list_head event_list; + spinlock_t event_list_lock; + + + struct deferred_split deferred_split_queue; + + + struct mem_cgroup_per_node *nodeinfo[0]; + +}; + + + + + + + +extern struct mem_cgroup *root_mem_cgroup; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mem_cgroup_is_root(struct mem_cgroup *memcg) +{ + return (memcg == root_mem_cgroup); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mem_cgroup_disabled(void) +{ + return !({ bool branch; if (__builtin_types_compatible_p(typeof(*&memory_cgrp_subsys_enabled_key), struct static_key_true)) branch = !arch_static_branch(&(&memory_cgrp_subsys_enabled_key)->key, true); else if (__builtin_types_compatible_p(typeof(*&memory_cgrp_subsys_enabled_key), struct static_key_false)) branch = !arch_static_branch_jump(&(&memory_cgrp_subsys_enabled_key)->key, true); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 1); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, + bool in_low_reclaim) +{ + if (mem_cgroup_disabled()) + return 0; + + if (in_low_reclaim) + return ({ do { extern void __compiletime_assert_1287(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.emin) == sizeof(char) || sizeof(memcg->memory.emin) == sizeof(short) || sizeof(memcg->memory.emin) == sizeof(int) || sizeof(memcg->memory.emin) == sizeof(long)) || sizeof(memcg->memory.emin) == sizeof(long long))) __compiletime_assert_1287(); } while (0); ({ typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) __x = (*(const volatile typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) *)&(memcg->memory.emin)); do { } while (0); (typeof(memcg->memory.emin))__x; }); }); + + return __builtin_choose_expr(((!!(sizeof((typeof(({ do { extern void __compiletime_assert_1288(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.emin) == sizeof(char) || sizeof(memcg->memory.emin) == sizeof(short) || sizeof(memcg->memory.emin) == sizeof(int) || sizeof(memcg->memory.emin) == sizeof(long)) || sizeof(memcg->memory.emin) == sizeof(long long))) __compiletime_assert_1288(); } while (0); ({ typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) __x = (*(const volatile typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) *)&(memcg->memory.emin)); do { } while (0); (typeof(memcg->memory.emin))__x; }); })) *)1 == (typeof(({ do { extern void __compiletime_assert_1289(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.elow) == sizeof(char) || sizeof(memcg->memory.elow) == sizeof(short) || sizeof(memcg->memory.elow) == sizeof(int) || sizeof(memcg->memory.elow) == sizeof(long)) || sizeof(memcg->memory.elow) == sizeof(long long))) __compiletime_assert_1289(); } while (0); ({ typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) __x = (*(const volatile typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) *)&(memcg->memory.elow)); do { } while (0); (typeof(memcg->memory.elow))__x; }); })) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(({ do { extern void __compiletime_assert_1288(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.emin) == sizeof(char) || sizeof(memcg->memory.emin) == sizeof(short) || sizeof(memcg->memory.emin) == sizeof(int) || sizeof(memcg->memory.emin) == sizeof(long)) || sizeof(memcg->memory.emin) == sizeof(long long))) __compiletime_assert_1288(); } while (0); ({ typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) __x = (*(const volatile typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) *)&(memcg->memory.emin)); do { } while (0); (typeof(memcg->memory.emin))__x; }); })) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(({ do { extern void __compiletime_assert_1289(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.elow) == sizeof(char) || sizeof(memcg->memory.elow) == sizeof(short) || sizeof(memcg->memory.elow) == sizeof(int) || sizeof(memcg->memory.elow) == sizeof(long)) || sizeof(memcg->memory.elow) == sizeof(long long))) __compiletime_assert_1289(); } while (0); ({ typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) __x = (*(const volatile typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) *)&(memcg->memory.elow)); do { } while (0); (typeof(memcg->memory.elow))__x; }); })) * 0l)) : (int *)8))))), ((({ do { extern void __compiletime_assert_1288(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.emin) == sizeof(char) || sizeof(memcg->memory.emin) == sizeof(short) || sizeof(memcg->memory.emin) == sizeof(int) || sizeof(memcg->memory.emin) == sizeof(long)) || sizeof(memcg->memory.emin) == sizeof(long long))) __compiletime_assert_1288(); } while (0); ({ typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) __x = (*(const volatile typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) *)&(memcg->memory.emin)); do { } while (0); (typeof(memcg->memory.emin))__x; }); })) > (({ do { extern void __compiletime_assert_1289(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.elow) == sizeof(char) || sizeof(memcg->memory.elow) == sizeof(short) || sizeof(memcg->memory.elow) == sizeof(int) || sizeof(memcg->memory.elow) == sizeof(long)) || sizeof(memcg->memory.elow) == sizeof(long long))) __compiletime_assert_1289(); } while (0); ({ typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) __x = (*(const volatile typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) *)&(memcg->memory.elow)); do { } while (0); (typeof(memcg->memory.elow))__x; }); })) ? (({ do { extern void __compiletime_assert_1288(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.emin) == sizeof(char) || sizeof(memcg->memory.emin) == sizeof(short) || sizeof(memcg->memory.emin) == sizeof(int) || sizeof(memcg->memory.emin) == sizeof(long)) || sizeof(memcg->memory.emin) == sizeof(long long))) __compiletime_assert_1288(); } while (0); ({ typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) __x = (*(const volatile typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) *)&(memcg->memory.emin)); do { } while (0); (typeof(memcg->memory.emin))__x; }); })) : (({ do { extern void __compiletime_assert_1289(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.elow) == sizeof(char) || sizeof(memcg->memory.elow) == sizeof(short) || sizeof(memcg->memory.elow) == sizeof(int) || sizeof(memcg->memory.elow) == sizeof(long)) || sizeof(memcg->memory.elow) == sizeof(long long))) __compiletime_assert_1289(); } while (0); ({ typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) __x = (*(const volatile typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) *)&(memcg->memory.elow)); do { } while (0); (typeof(memcg->memory.elow))__x; }); }))), ({ typeof(({ do { extern void __compiletime_assert_1288(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.emin) == sizeof(char) || sizeof(memcg->memory.emin) == sizeof(short) || sizeof(memcg->memory.emin) == sizeof(int) || sizeof(memcg->memory.emin) == sizeof(long)) || sizeof(memcg->memory.emin) == sizeof(long long))) __compiletime_assert_1288(); } while (0); ({ typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) __x = (*(const volatile typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) *)&(memcg->memory.emin)); do { } while (0); (typeof(memcg->memory.emin))__x; }); })) __UNIQUE_ID___x1290 = (({ do { extern void __compiletime_assert_1288(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.emin) == sizeof(char) || sizeof(memcg->memory.emin) == sizeof(short) || sizeof(memcg->memory.emin) == sizeof(int) || sizeof(memcg->memory.emin) == sizeof(long)) || sizeof(memcg->memory.emin) == sizeof(long long))) __compiletime_assert_1288(); } while (0); ({ typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) __x = (*(const volatile typeof( _Generic((memcg->memory.emin), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.emin))) *)&(memcg->memory.emin)); do { } while (0); (typeof(memcg->memory.emin))__x; }); })); typeof(({ do { extern void __compiletime_assert_1289(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.elow) == sizeof(char) || sizeof(memcg->memory.elow) == sizeof(short) || sizeof(memcg->memory.elow) == sizeof(int) || sizeof(memcg->memory.elow) == sizeof(long)) || sizeof(memcg->memory.elow) == sizeof(long long))) __compiletime_assert_1289(); } while (0); ({ typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) __x = (*(const volatile typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) *)&(memcg->memory.elow)); do { } while (0); (typeof(memcg->memory.elow))__x; }); })) __UNIQUE_ID___y1291 = (({ do { extern void __compiletime_assert_1289(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(memcg->memory.elow) == sizeof(char) || sizeof(memcg->memory.elow) == sizeof(short) || sizeof(memcg->memory.elow) == sizeof(int) || sizeof(memcg->memory.elow) == sizeof(long)) || sizeof(memcg->memory.elow) == sizeof(long long))) __compiletime_assert_1289(); } while (0); ({ typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) __x = (*(const volatile typeof( _Generic((memcg->memory.elow), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (memcg->memory.elow))) *)&(memcg->memory.elow)); do { } while (0); (typeof(memcg->memory.elow))__x; }); })); ((__UNIQUE_ID___x1290) > (__UNIQUE_ID___y1291) ? (__UNIQUE_ID___x1290) : (__UNIQUE_ID___y1291)); })) + ; +} + +enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, + struct mem_cgroup *memcg); + +int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); + +void mem_cgroup_uncharge(struct page *page); +void mem_cgroup_uncharge_list(struct list_head *page_list); + +void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); + +static struct mem_cgroup_per_node * +mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) +{ + return memcg->nodeinfo[nid]; +} +# 379 "./include/linux/memcontrol.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, + struct pglist_data *pgdat) +{ + struct mem_cgroup_per_node *mz; + struct lruvec *lruvec; + + if (mem_cgroup_disabled()) { + lruvec = &pgdat->__lruvec; + goto out; + } + + if (!memcg) + memcg = root_mem_cgroup; + + mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); + lruvec = &mz->lruvec; +out: + + + + + + if (__builtin_expect(!!(lruvec->pgdat != pgdat), 0)) + lruvec->pgdat = pgdat; + return lruvec; +} + +struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); + +struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); + +struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); + +struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ + return css ? ({ void *__mptr = (void *)(css); do { extern void __compiletime_assert_1292(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(css)), typeof(((struct mem_cgroup *)0)->css)) && !__builtin_types_compatible_p(typeof(*(css)), typeof(void))))) __compiletime_assert_1292(); } while (0); ((struct mem_cgroup *)(__mptr - __builtin_offsetof(struct mem_cgroup, css))); }) : ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mem_cgroup_put(struct mem_cgroup *memcg) +{ + if (memcg) + css_put(&memcg->css); +} + + + + +struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, + struct mem_cgroup *, + struct mem_cgroup_reclaim_cookie *); +void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); +int mem_cgroup_scan_tasks(struct mem_cgroup *, + int (*)(struct task_struct *, void *), void *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned short mem_cgroup_id(struct mem_cgroup *memcg) +{ + if (mem_cgroup_disabled()) + return 0; + + return memcg->id.id; +} +struct mem_cgroup *mem_cgroup_from_id(unsigned short id); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) +{ + return mem_cgroup_from_css(seq_css(m)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) +{ + struct mem_cgroup_per_node *mz; + + if (mem_cgroup_disabled()) + return ((void *)0); + + mz = ({ void *__mptr = (void *)(lruvec); do { extern void __compiletime_assert_1293(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(lruvec)), typeof(((struct mem_cgroup_per_node *)0)->lruvec)) && !__builtin_types_compatible_p(typeof(*(lruvec)), typeof(void))))) __compiletime_assert_1293(); } while (0); ((struct mem_cgroup_per_node *)(__mptr - __builtin_offsetof(struct mem_cgroup_per_node, lruvec))); }); + return mz->memcg; +} +# 467 "./include/linux/memcontrol.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) +{ + if (!memcg->memory.parent) + return ((void *)0); + return ({ void *__mptr = (void *)(memcg->memory.parent); do { extern void __compiletime_assert_1294(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(memcg->memory.parent)), typeof(((struct mem_cgroup *)0)->memory)) && !__builtin_types_compatible_p(typeof(*(memcg->memory.parent)), typeof(void))))) __compiletime_assert_1294(); } while (0); ((struct mem_cgroup *)(__mptr - __builtin_offsetof(struct mem_cgroup, memory))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root) +{ + if (root == memcg) + return true; + if (!root->use_hierarchy) + return false; + return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) +{ + struct mem_cgroup *task_memcg; + bool match = false; + + rcu_read_lock(); + task_memcg = mem_cgroup_from_task(({ typeof(*(mm->owner)) *________p1 = (typeof(*(mm->owner)) *)({ do { extern void __compiletime_assert_1295(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((mm->owner)) == sizeof(char) || sizeof((mm->owner)) == sizeof(short) || sizeof((mm->owner)) == sizeof(int) || sizeof((mm->owner)) == sizeof(long)) || sizeof((mm->owner)) == sizeof(long long))) __compiletime_assert_1295(); } while (0); ({ typeof( _Generic(((mm->owner)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((mm->owner)))) __x = (*(const volatile typeof( _Generic(((mm->owner)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((mm->owner)))) *)&((mm->owner))); do { } while (0); (typeof((mm->owner)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/memcontrol.h", 491, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(mm->owner)) *)(________p1)); })); + if (task_memcg) + match = mem_cgroup_is_descendant(task_memcg, memcg); + rcu_read_unlock(); + return match; +} + +struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); +ino_t page_cgroup_ino(struct page *page); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mem_cgroup_online(struct mem_cgroup *memcg) +{ + if (mem_cgroup_disabled()) + return true; + return !!(memcg->css.flags & CSS_ONLINE); +} + + + + +int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); + +void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, + int zid, int nr_pages); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, + enum lru_list lru, int zone_idx) +{ + struct mem_cgroup_per_node *mz; + + mz = ({ void *__mptr = (void *)(lruvec); do { extern void __compiletime_assert_1296(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(lruvec)), typeof(((struct mem_cgroup_per_node *)0)->lruvec)) && !__builtin_types_compatible_p(typeof(*(lruvec)), typeof(void))))) __compiletime_assert_1296(); } while (0); ((struct mem_cgroup_per_node *)(__mptr - __builtin_offsetof(struct mem_cgroup_per_node, lruvec))); }); + return mz->lru_zone_size[zone_idx][lru]; +} + +void mem_cgroup_handle_over_high(void); + +unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); + +unsigned long mem_cgroup_size(struct mem_cgroup *memcg); + +void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, + struct task_struct *p); + +void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mem_cgroup_enter_user_fault(void) +{ + ({ int __ret_warn_on = !!(get_current()->in_user_fault); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1297)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/memcontrol.h"), "i" (539), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1298)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1299)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + get_current()->in_user_fault = 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mem_cgroup_exit_user_fault(void) +{ + ({ int __ret_warn_on = !!(!get_current()->in_user_fault); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1300)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/memcontrol.h"), "i" (545), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1301)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1302)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + get_current()->in_user_fault = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool task_in_memcg_oom(struct task_struct *p) +{ + return p->memcg_in_oom; +} + +bool mem_cgroup_oom_synchronize(bool wait); +struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, + struct mem_cgroup *oom_domain); +void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); + + +extern bool cgroup_memory_noswap; + + +struct mem_cgroup *lock_page_memcg(struct page *page); +void __unlock_page_memcg(struct mem_cgroup *memcg); +void unlock_page_memcg(struct page *page); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) +{ + long x = atomic_long_read(&memcg->vmstats[idx]); + + if (x < 0) + x = 0; + + return x; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long memcg_page_state_local(struct mem_cgroup *memcg, + int idx) +{ + long x = 0; + int cpu; + + for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (((const struct cpumask *)&__cpu_possible_mask))), ((cpu)) < nr_cpu_ids;) + x += (*({ do { const void *__vpp_verify = (typeof((&(memcg->vmstats_local->stat[idx])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(memcg->vmstats_local->stat[idx])))) *)((&(memcg->vmstats_local->stat[idx]))))); (typeof((typeof(*((&(memcg->vmstats_local->stat[idx])))) *)((&(memcg->vmstats_local->stat[idx]))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); + + if (x < 0) + x = 0; + + return x; +} + +void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mod_memcg_state(struct mem_cgroup *memcg, + int idx, int val) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + __mod_memcg_state(memcg, idx, val); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} +# 630 "./include/linux/memcontrol.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __mod_memcg_page_state(struct page *page, + int idx, int val) +{ + if (page->mem_cgroup) + __mod_memcg_state(page->mem_cgroup, idx, val); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mod_memcg_page_state(struct page *page, + int idx, int val) +{ + if (page->mem_cgroup) + mod_memcg_state(page->mem_cgroup, idx, val); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long lruvec_page_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + struct mem_cgroup_per_node *pn; + long x; + + if (mem_cgroup_disabled()) + return node_page_state(lruvec_pgdat(lruvec), idx); + + pn = ({ void *__mptr = (void *)(lruvec); do { extern void __compiletime_assert_1303(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(lruvec)), typeof(((struct mem_cgroup_per_node *)0)->lruvec)) && !__builtin_types_compatible_p(typeof(*(lruvec)), typeof(void))))) __compiletime_assert_1303(); } while (0); ((struct mem_cgroup_per_node *)(__mptr - __builtin_offsetof(struct mem_cgroup_per_node, lruvec))); }); + x = atomic_long_read(&pn->lruvec_stat[idx]); + + if (x < 0) + x = 0; + + return x; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long lruvec_page_state_local(struct lruvec *lruvec, + enum node_stat_item idx) +{ + struct mem_cgroup_per_node *pn; + long x = 0; + int cpu; + + if (mem_cgroup_disabled()) + return node_page_state(lruvec_pgdat(lruvec), idx); + + pn = ({ void *__mptr = (void *)(lruvec); do { extern void __compiletime_assert_1304(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(lruvec)), typeof(((struct mem_cgroup_per_node *)0)->lruvec)) && !__builtin_types_compatible_p(typeof(*(lruvec)), typeof(void))))) __compiletime_assert_1304(); } while (0); ((struct mem_cgroup_per_node *)(__mptr - __builtin_offsetof(struct mem_cgroup_per_node, lruvec))); }); + for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (((const struct cpumask *)&__cpu_possible_mask))), ((cpu)) < nr_cpu_ids;) + x += (*({ do { const void *__vpp_verify = (typeof((&(pn->lruvec_stat_local->count[idx])) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&(pn->lruvec_stat_local->count[idx])))) *)((&(pn->lruvec_stat_local->count[idx]))))); (typeof((typeof(*((&(pn->lruvec_stat_local->count[idx])))) *)((&(pn->lruvec_stat_local->count[idx]))))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })); + + if (x < 0) + x = 0; + + return x; +} + +void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val); +void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); +void mod_memcg_obj_state(void *p, int idx, int val); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mod_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + __mod_lruvec_state(lruvec, idx, val); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + struct page *head = compound_head(page); + pg_data_t *pgdat = page_pgdat(page); + struct lruvec *lruvec; + + + if (!head->mem_cgroup) { + __mod_node_page_state(pgdat, idx, val); + return; + } + + lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat); + __mod_lruvec_state(lruvec, idx, val); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mod_lruvec_page_state(struct page *page, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + __mod_lruvec_page_state(page, idx, val); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} + +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned); + +void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, + unsigned long count); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void count_memcg_events(struct mem_cgroup *memcg, + enum vm_event_item idx, + unsigned long count) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + __count_memcg_events(memcg, idx, count); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void count_memcg_page_event(struct page *page, + enum vm_event_item idx) +{ + if (page->mem_cgroup) + count_memcg_events(page->mem_cgroup, idx, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void count_memcg_event_mm(struct mm_struct *mm, + enum vm_event_item idx) +{ + struct mem_cgroup *memcg; + + if (mem_cgroup_disabled()) + return; + + rcu_read_lock(); + memcg = mem_cgroup_from_task(({ typeof(*(mm->owner)) *________p1 = (typeof(*(mm->owner)) *)({ do { extern void __compiletime_assert_1305(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((mm->owner)) == sizeof(char) || sizeof((mm->owner)) == sizeof(short) || sizeof((mm->owner)) == sizeof(int) || sizeof((mm->owner)) == sizeof(long)) || sizeof((mm->owner)) == sizeof(long long))) __compiletime_assert_1305(); } while (0); ({ typeof( _Generic(((mm->owner)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((mm->owner)))) __x = (*(const volatile typeof( _Generic(((mm->owner)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((mm->owner)))) *)&((mm->owner))); do { } while (0); (typeof((mm->owner)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/memcontrol.h", 758, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(mm->owner)) *)(________p1)); })); + if (__builtin_expect(!!(memcg), 1)) + count_memcg_events(memcg, idx, 1); + rcu_read_unlock(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memcg_memory_event(struct mem_cgroup *memcg, + enum memcg_memory_event event) +{ + atomic_long_inc(&memcg->memory_events_local[event]); + cgroup_file_notify(&memcg->events_local_file); + + do { + atomic_long_inc(&memcg->memory_events[event]); + cgroup_file_notify(&memcg->events_file); + + if (!({ bool branch; if (__builtin_types_compatible_p(typeof(*&memory_cgrp_subsys_on_dfl_key), struct static_key_true)) branch = !arch_static_branch(&(&memory_cgrp_subsys_on_dfl_key)->key, true); else if (__builtin_types_compatible_p(typeof(*&memory_cgrp_subsys_on_dfl_key), struct static_key_false)) branch = !arch_static_branch_jump(&(&memory_cgrp_subsys_on_dfl_key)->key, true); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 1); })) + break; + if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) + break; + } while ((memcg = parent_mem_cgroup(memcg)) && + !mem_cgroup_is_root(memcg)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memcg_memory_event_mm(struct mm_struct *mm, + enum memcg_memory_event event) +{ + struct mem_cgroup *memcg; + + if (mem_cgroup_disabled()) + return; + + rcu_read_lock(); + memcg = mem_cgroup_from_task(({ typeof(*(mm->owner)) *________p1 = (typeof(*(mm->owner)) *)({ do { extern void __compiletime_assert_1306(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((mm->owner)) == sizeof(char) || sizeof((mm->owner)) == sizeof(short) || sizeof((mm->owner)) == sizeof(int) || sizeof((mm->owner)) == sizeof(long)) || sizeof((mm->owner)) == sizeof(long long))) __compiletime_assert_1306(); } while (0); ({ typeof( _Generic(((mm->owner)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((mm->owner)))) __x = (*(const volatile typeof( _Generic(((mm->owner)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((mm->owner)))) *)&((mm->owner))); do { } while (0); (typeof((mm->owner)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/memcontrol.h", 791, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(mm->owner)) *)(________p1)); })); + if (__builtin_expect(!!(memcg), 1)) + memcg_memory_event(memcg, event); + rcu_read_unlock(); +} + + +void mem_cgroup_split_huge_fixup(struct page *head); +# 1132 "./include/linux/memcontrol.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __inc_memcg_state(struct mem_cgroup *memcg, + int idx) +{ + __mod_memcg_state(memcg, idx, 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __dec_memcg_state(struct mem_cgroup *memcg, + int idx) +{ + __mod_memcg_state(memcg, idx, -1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __inc_memcg_page_state(struct page *page, + int idx) +{ + __mod_memcg_page_state(page, idx, 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __dec_memcg_page_state(struct page *page, + int idx) +{ + __mod_memcg_page_state(page, idx, -1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __inc_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + __mod_lruvec_state(lruvec, idx, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __dec_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + __mod_lruvec_state(lruvec, idx, -1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __inc_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + __mod_lruvec_page_state(page, idx, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __dec_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + __mod_lruvec_page_state(page, idx, -1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __inc_lruvec_slab_state(void *p, enum node_stat_item idx) +{ + __mod_lruvec_slab_state(p, idx, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __dec_lruvec_slab_state(void *p, enum node_stat_item idx) +{ + __mod_lruvec_slab_state(p, idx, -1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inc_memcg_state(struct mem_cgroup *memcg, + int idx) +{ + mod_memcg_state(memcg, idx, 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dec_memcg_state(struct mem_cgroup *memcg, + int idx) +{ + mod_memcg_state(memcg, idx, -1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inc_memcg_page_state(struct page *page, + int idx) +{ + mod_memcg_page_state(page, idx, 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dec_memcg_page_state(struct page *page, + int idx) +{ + mod_memcg_page_state(page, idx, -1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inc_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + mod_lruvec_state(lruvec, idx, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dec_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) +{ + mod_lruvec_state(lruvec, idx, -1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void inc_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + mod_lruvec_page_state(page, idx, 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dec_lruvec_page_state(struct page *page, + enum node_stat_item idx) +{ + mod_lruvec_page_state(page, idx, -1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct lruvec *parent_lruvec(struct lruvec *lruvec) +{ + struct mem_cgroup *memcg; + + memcg = lruvec_memcg(lruvec); + if (!memcg) + return ((void *)0); + memcg = parent_mem_cgroup(memcg); + if (!memcg) + return ((void *)0); + return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); +} + + + +struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback); + +void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, + struct bdi_writeback *wb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mem_cgroup_track_foreign_dirty(struct page *page, + struct bdi_writeback *wb) +{ + if (mem_cgroup_disabled()) + return; + + if (__builtin_expect(!!(&page->mem_cgroup->css != wb->memcg_css), 0)) + mem_cgroup_track_foreign_dirty_slowpath(page, wb); +} + +void mem_cgroup_flush_foreign(struct bdi_writeback *wb); +# 1306 "./include/linux/memcontrol.h" +struct sock; +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); + +extern struct static_key_false memcg_sockets_enabled_key; + +void mem_cgroup_sk_alloc(struct sock *sk); +void mem_cgroup_sk_free(struct sock *sk); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) +{ + if (!({ bool branch; if (__builtin_types_compatible_p(typeof(*&memory_cgrp_subsys_on_dfl_key), struct static_key_true)) branch = !arch_static_branch(&(&memory_cgrp_subsys_on_dfl_key)->key, true); else if (__builtin_types_compatible_p(typeof(*&memory_cgrp_subsys_on_dfl_key), struct static_key_false)) branch = !arch_static_branch_jump(&(&memory_cgrp_subsys_on_dfl_key)->key, true); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 1); }) && memcg->tcpmem_pressure) + return true; + do { + if ((({ unsigned long __dummy; typeof(memcg->socket_pressure) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((jiffies) - (memcg->socket_pressure)) < 0))) + return true; + } while ((memcg = parent_mem_cgroup(memcg))); + return false; +} + +extern int memcg_expand_shrinker_maps(int new_id); + +extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id); +# 1344 "./include/linux/memcontrol.h" +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); +void memcg_kmem_put_cache(struct kmem_cache *cachep); + + +int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, + unsigned int nr_pages); +void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); +int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); +void __memcg_kmem_uncharge_page(struct page *page, int order); + +extern struct static_key_false memcg_kmem_enabled_key; +extern struct workqueue_struct *memcg_kmem_cache_wq; + +extern int memcg_nr_cache_ids; +void memcg_get_cache_ids(void); +void memcg_put_cache_ids(void); +# 1369 "./include/linux/memcontrol.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool memcg_kmem_enabled(void) +{ + return ({ bool branch; if (__builtin_types_compatible_p(typeof(*&memcg_kmem_enabled_key), struct static_key_true)) branch = arch_static_branch_jump(&(&memcg_kmem_enabled_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&memcg_kmem_enabled_key), struct static_key_false)) branch = arch_static_branch(&(&memcg_kmem_enabled_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int memcg_kmem_charge_page(struct page *page, gfp_t gfp, + int order) +{ + if (memcg_kmem_enabled()) + return __memcg_kmem_charge_page(page, gfp, order); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memcg_kmem_uncharge_page(struct page *page, int order) +{ + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge_page(page, order); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, + unsigned int nr_pages) +{ + if (memcg_kmem_enabled()) + return __memcg_kmem_charge(memcg, gfp, nr_pages); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memcg_kmem_uncharge(struct mem_cgroup *memcg, + unsigned int nr_pages) +{ + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge(memcg, nr_pages); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int memcg_cache_id(struct mem_cgroup *memcg) +{ + return memcg ? memcg->kmemcg_id : -1; +} + +struct mem_cgroup *mem_cgroup_from_obj(void *p); +# 54 "./include/net/sock.h" 2 +# 1 "./include/linux/static_key.h" 1 +# 55 "./include/net/sock.h" 2 + + + + +# 1 "./include/linux/filter.h" 1 +# 19 "./include/linux/filter.h" +# 1 "./include/linux/set_memory.h" 1 +# 9 "./include/linux/set_memory.h" +# 1 "./arch/x86/include/asm/set_memory.h" 1 + + + + + +# 1 "./include/asm-generic/set_memory.h" 1 + + + + + + + +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); +# 7 "./arch/x86/include/asm/set_memory.h" 2 +# 37 "./arch/x86/include/asm/set_memory.h" +int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot); +int _set_memory_uc(unsigned long addr, int numpages); +int _set_memory_wc(unsigned long addr, int numpages); +int _set_memory_wt(unsigned long addr, int numpages); +int _set_memory_wb(unsigned long addr, int numpages); +int set_memory_uc(unsigned long addr, int numpages); +int set_memory_wc(unsigned long addr, int numpages); +int set_memory_wb(unsigned long addr, int numpages); +int set_memory_np(unsigned long addr, int numpages); +int set_memory_4k(unsigned long addr, int numpages); +int set_memory_encrypted(unsigned long addr, int numpages); +int set_memory_decrypted(unsigned long addr, int numpages); +int set_memory_np_noalias(unsigned long addr, int numpages); +int set_memory_nonglobal(unsigned long addr, int numpages); +int set_memory_global(unsigned long addr, int numpages); + +int set_pages_array_uc(struct page **pages, int addrinarray); +int set_pages_array_wc(struct page **pages, int addrinarray); +int set_pages_array_wt(struct page **pages, int addrinarray); +int set_pages_array_wb(struct page **pages, int addrinarray); +# 78 "./arch/x86/include/asm/set_memory.h" +int set_pages_uc(struct page *page, int numpages); +int set_pages_wb(struct page *page, int numpages); +int set_pages_ro(struct page *page, int numpages); +int set_pages_rw(struct page *page, int numpages); + +int set_direct_map_invalid_noflush(struct page *page); +int set_direct_map_default_noflush(struct page *page); + +extern int kernel_set_to_readonly; +# 95 "./arch/x86/include/asm/set_memory.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int set_mce_nospec(unsigned long pfn, bool unmap) +{ + unsigned long decoy_addr; + int rc; +# 112 "./arch/x86/include/asm/set_memory.h" + decoy_addr = (pfn << 12) + (((unsigned long)page_offset_base) ^ ((((1UL))) << (63))); + + if (unmap) + rc = set_memory_np(decoy_addr, 1); + else + rc = set_memory_uc(decoy_addr, 1); + if (rc) + printk("\001" "4" "Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); + return rc; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int clear_mce_nospec(unsigned long pfn) +{ + return set_memory_wb((unsigned long) ((void *)((unsigned long)((pfn) << 12)+((unsigned long)page_offset_base))), 1); +} +# 10 "./include/linux/set_memory.h" 2 +# 20 "./include/linux/filter.h" 2 + +# 1 "./include/linux/if_vlan.h" 1 +# 11 "./include/linux/if_vlan.h" +# 1 "./include/linux/etherdevice.h" 1 +# 23 "./include/linux/etherdevice.h" +# 1 "./include/linux/crc32.h" 1 +# 9 "./include/linux/crc32.h" +# 1 "./include/linux/bitrev.h" 1 +# 15 "./include/linux/bitrev.h" +extern u8 const byte_rev_table[256]; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u8 __bitrev8(u8 byte) +{ + return byte_rev_table[byte]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 __bitrev16(u16 x) +{ + return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __bitrev32(u32 x) +{ + return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16); +} +# 10 "./include/linux/crc32.h" 2 + +u32 __attribute__((__pure__)) crc32_le(u32 crc, unsigned char const *p, size_t len); +u32 __attribute__((__pure__)) crc32_be(u32 crc, unsigned char const *p, size_t len); +# 32 "./include/linux/crc32.h" +u32 __attribute__((__const__)) crc32_le_shift(u32 crc, size_t len); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) +{ + return crc32_le_shift(crc1, len2) ^ crc2; +} + +u32 __attribute__((__pure__)) __crc32c_le(u32 crc, unsigned char const *p, size_t len); +# 59 "./include/linux/crc32.h" +u32 __attribute__((__const__)) __crc32c_le_shift(u32 crc, size_t len); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2) +{ + return __crc32c_le_shift(crc1, len2) ^ crc2; +} +# 24 "./include/linux/etherdevice.h" 2 +# 1 "./arch/x86/include/asm/unaligned.h" 1 +# 9 "./arch/x86/include/asm/unaligned.h" +# 1 "./include/linux/unaligned/access_ok.h" 1 + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u16 get_unaligned_le16(const void *p) +{ + return __le16_to_cpup((__le16 *)p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32 get_unaligned_le32(const void *p) +{ + return __le32_to_cpup((__le32 *)p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u64 get_unaligned_le64(const void *p) +{ + return __le64_to_cpup((__le64 *)p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u16 get_unaligned_be16(const void *p) +{ + return __be16_to_cpup((__be16 *)p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32 get_unaligned_be32(const void *p) +{ + return __be32_to_cpup((__be32 *)p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u64 get_unaligned_be64(const void *p) +{ + return __be64_to_cpup((__be64 *)p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void put_unaligned_le16(u16 val, void *p) +{ + *((__le16 *)p) = (( __le16)(__u16)(val)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void put_unaligned_le32(u32 val, void *p) +{ + *((__le32 *)p) = (( __le32)(__u32)(val)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void put_unaligned_le64(u64 val, void *p) +{ + *((__le64 *)p) = (( __le64)(__u64)(val)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void put_unaligned_be16(u16 val, void *p) +{ + *((__be16 *)p) = (( __be16)(__u16)__builtin_bswap16((__u16)((val)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void put_unaligned_be32(u32 val, void *p) +{ + *((__be32 *)p) = (( __be32)(__u32)__builtin_bswap32((__u32)((val)))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void put_unaligned_be64(u64 val, void *p) +{ + *((__be64 *)p) = (( __be64)(__u64)__builtin_bswap64((__u64)((val)))); +} +# 10 "./arch/x86/include/asm/unaligned.h" 2 +# 1 "./include/linux/unaligned/generic.h" 1 +# 11 "./include/linux/unaligned/generic.h" +extern void __bad_unaligned_access_size(void); +# 71 "./include/linux/unaligned/generic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __get_unaligned_be24(const u8 *p) +{ + return p[0] << 16 | p[1] << 8 | p[2]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 get_unaligned_be24(const void *p) +{ + return __get_unaligned_be24(p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __get_unaligned_le24(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 get_unaligned_le24(const void *p) +{ + return __get_unaligned_le24(p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __put_unaligned_be24(const u32 val, u8 *p) +{ + *p++ = val >> 16; + *p++ = val >> 8; + *p++ = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_unaligned_be24(const u32 val, void *p) +{ + __put_unaligned_be24(val, p); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __put_unaligned_le24(const u32 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; + *p++ = val >> 16; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void put_unaligned_le24(const u32 val, void *p) +{ + __put_unaligned_le24(val, p); +} +# 11 "./arch/x86/include/asm/unaligned.h" 2 +# 25 "./include/linux/etherdevice.h" 2 + + + +struct device; +int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); +unsigned char *arch_get_platform_mac_address(void); +int nvmem_get_mac_address(struct device *dev, void *addrbuf); +u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len); +__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); +extern const struct header_ops eth_header_ops; + +int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, + const void *daddr, const void *saddr, unsigned len); +int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); +int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, + __be16 type); +void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, + const unsigned char *haddr); +__be16 eth_header_parse_protocol(const struct sk_buff *skb); +int eth_prepare_mac_addr_change(struct net_device *dev, void *p); +void eth_commit_mac_addr_change(struct net_device *dev, void *p); +int eth_mac_addr(struct net_device *dev, void *p); +int eth_validate_addr(struct net_device *dev); + +struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, + unsigned int rxqs); + + + +struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, + unsigned int txqs, + unsigned int rxqs); + + +struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb); +int eth_gro_complete(struct sk_buff *skb, int nhoff); + + +static const u8 eth_reserved_addr_base[6] __attribute__((__aligned__(2))) = +{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +# 76 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = (( __be16)(__u16)__builtin_bswap16((__u16)((0xfff0)))); + + + return (((*(const u32 *)addr) ^ (*(const u32 *)b)) | + ( int)((a[2] ^ b[2]) & m)) == 0; + + + +} +# 98 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_zero_ether_addr(const u8 *addr) +{ + + return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0; + + + + + +} +# 116 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_multicast_ether_addr(const u8 *addr) +{ + + u32 a = *(const u32 *)addr; + + + + + + + return 0x01 & a; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_multicast_ether_addr_64bits(const u8 addr[6+2]) +{ + + + + + return 0x01 & (*(const u64 *)addr); + + + + +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_local_ether_addr(const u8 *addr) +{ + return 0x02 & addr[0]; +} +# 162 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_broadcast_ether_addr(const u8 *addr) +{ + return (*(const u16 *)(addr + 0) & + *(const u16 *)(addr + 2) & + *(const u16 *)(addr + 4)) == 0xffff; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_unicast_ether_addr(const u8 *addr) +{ + return !is_multicast_ether_addr(addr); +} +# 191 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_valid_ether_addr(const u8 *addr) +{ + + + return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); +} +# 206 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool eth_proto_is_802_3(__be16 proto) +{ + + + proto &= (( __be16)(__u16)__builtin_bswap16((__u16)((0xFF00)))); + + + return ( u16)proto >= ( u16)(( __be16)(__u16)__builtin_bswap16((__u16)((0x0600)))); +} +# 223 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, 6); + addr[0] &= 0xfe; + addr[0] |= 0x02; +} +# 238 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, 6); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eth_zero_addr(u8 *addr) +{ + memset(addr, 0x00, 6); +} +# 263 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eth_hw_addr_random(struct net_device *dev) +{ + dev->addr_assign_type = 1; + eth_random_addr(dev->dev_addr); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 eth_hw_addr_crc(struct netdev_hw_addr *ha) +{ + return ({ u32 __x = crc32_le(~0, ha->addr, 6); __builtin_constant_p(__x) ? ({ u32 ___x = __x; ___x = (___x >> 16) | (___x << 16); ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); ___x; }) : __bitrev32(__x); }); +} +# 287 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ether_addr_copy(u8 *dst, const u8 *src) +{ + + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +# 300 "./include/linux/etherdevice.h" +} +# 310 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eth_hw_addr_inherit(struct net_device *dst, + struct net_device *src) +{ + dst->addr_assign_type = src->addr_assign_type; + ether_addr_copy(dst->dev_addr, src->dev_addr); +} +# 326 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + + u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) | + ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4))); + + return fold == 0; + + + + + + +} +# 355 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ether_addr_equal_64bits(const u8 addr1[6+2], + const u8 addr2[6+2]) +{ + + u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); + + + + + return (fold << 16) == 0; + + + + +} +# 380 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2) +{ + + return ether_addr_equal(addr1, addr2); + + + +} +# 399 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, + const u8 *mask) +{ + int i; + + for (i = 0; i < 6; i++) { + if ((addr1[i] ^ addr2[i]) & mask[i]) + return false; + } + + return true; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 ether_addr_to_u64(const u8 *addr) +{ + u64 u = 0; + int i; + + for (i = 0; i < 6; i++) + u = u << 8 | addr[i]; + + return u; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void u64_to_ether_addr(u64 u, u8 *addr) +{ + int i; + + for (i = 6 - 1; i >= 0; i--) { + addr[i] = u & 0xff; + u = u >> 8; + } +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eth_addr_dec(u8 *addr) +{ + u64 u = ether_addr_to_u64(addr); + + u--; + u64_to_ether_addr(u, addr); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eth_addr_inc(u8 *addr) +{ + u64 u = ether_addr_to_u64(addr); + + u++; + u64_to_ether_addr(u, addr); +} +# 480 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_etherdev_addr(const struct net_device *dev, + const u8 addr[6 + 2]) +{ + struct netdev_hw_addr *ha; + bool res = false; + + rcu_read_lock(); + for (({ ; do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!(0) && !rcu_read_lock_any_held())) { __warned = true; lockdep_rcu_suspicious("include/linux/etherdevice.h", 487, "RCU-list traversed in non-reader section!"); } } while (0); }), ha = ({ void *__mptr = (void *)(({ do { extern void __compiletime_assert_1307(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&dev->dev_addrs.list)->next) == sizeof(char) || sizeof((&dev->dev_addrs.list)->next) == sizeof(short) || sizeof((&dev->dev_addrs.list)->next) == sizeof(int) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long)) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long long))) __compiletime_assert_1307(); } while (0); ({ typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) __x = (*(const volatile typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) *)&((&dev->dev_addrs.list)->next)); do { } while (0); (typeof((&dev->dev_addrs.list)->next))__x; }); })); do { extern void __compiletime_assert_1308(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(({ do { extern void __compiletime_assert_1307(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&dev->dev_addrs.list)->next) == sizeof(char) || sizeof((&dev->dev_addrs.list)->next) == sizeof(short) || sizeof((&dev->dev_addrs.list)->next) == sizeof(int) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long)) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long long))) __compiletime_assert_1307(); } while (0); ({ typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) __x = (*(const volatile typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) *)&((&dev->dev_addrs.list)->next)); do { } while (0); (typeof((&dev->dev_addrs.list)->next))__x; }); }))), typeof(((typeof(*ha) *)0)->list)) && !__builtin_types_compatible_p(typeof(*(({ do { extern void __compiletime_assert_1307(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&dev->dev_addrs.list)->next) == sizeof(char) || sizeof((&dev->dev_addrs.list)->next) == sizeof(short) || sizeof((&dev->dev_addrs.list)->next) == sizeof(int) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long)) || sizeof((&dev->dev_addrs.list)->next) == sizeof(long long))) __compiletime_assert_1307(); } while (0); ({ typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) __x = (*(const volatile typeof( _Generic(((&dev->dev_addrs.list)->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&dev->dev_addrs.list)->next))) *)&((&dev->dev_addrs.list)->next)); do { } while (0); (typeof((&dev->dev_addrs.list)->next))__x; }); }))), typeof(void))))) __compiletime_assert_1308(); } while (0); ((typeof(*ha) *)(__mptr - __builtin_offsetof(typeof(*ha), list))); }); &ha->list != (&dev->dev_addrs.list); ha = ({ void *__mptr = (void *)(({ do { extern void __compiletime_assert_1309(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ha->list.next) == sizeof(char) || sizeof(ha->list.next) == sizeof(short) || sizeof(ha->list.next) == sizeof(int) || sizeof(ha->list.next) == sizeof(long)) || sizeof(ha->list.next) == sizeof(long long))) __compiletime_assert_1309(); } while (0); ({ typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) __x = (*(const volatile typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) *)&(ha->list.next)); do { } while (0); (typeof(ha->list.next))__x; }); })); do { extern void __compiletime_assert_1310(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(({ do { extern void __compiletime_assert_1309(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ha->list.next) == sizeof(char) || sizeof(ha->list.next) == sizeof(short) || sizeof(ha->list.next) == sizeof(int) || sizeof(ha->list.next) == sizeof(long)) || sizeof(ha->list.next) == sizeof(long long))) __compiletime_assert_1309(); } while (0); ({ typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) __x = (*(const volatile typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) *)&(ha->list.next)); do { } while (0); (typeof(ha->list.next))__x; }); }))), typeof(((typeof(*ha) *)0)->list)) && !__builtin_types_compatible_p(typeof(*(({ do { extern void __compiletime_assert_1309(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ha->list.next) == sizeof(char) || sizeof(ha->list.next) == sizeof(short) || sizeof(ha->list.next) == sizeof(int) || sizeof(ha->list.next) == sizeof(long)) || sizeof(ha->list.next) == sizeof(long long))) __compiletime_assert_1309(); } while (0); ({ typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) __x = (*(const volatile typeof( _Generic((ha->list.next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ha->list.next))) *)&(ha->list.next)); do { } while (0); (typeof(ha->list.next))__x; }); }))), typeof(void))))) __compiletime_assert_1310(); } while (0); ((typeof(*ha) *)(__mptr - __builtin_offsetof(typeof(*ha), list))); })) { + res = ether_addr_equal_64bits(addr, ha->addr); + if (res) + break; + } + rcu_read_unlock(); + return res; +} +# 509 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long compare_ether_header(const void *a, const void *b) +{ + + unsigned long fold; +# 522 "./include/linux/etherdevice.h" + fold = *(unsigned long *)a ^ *(unsigned long *)b; + fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); + return fold; + + + + + + + +} +# 541 "./include/linux/etherdevice.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int eth_skb_pad(struct sk_buff *skb) +{ + return skb_put_padto(skb, 60); +} +# 12 "./include/linux/if_vlan.h" 2 +# 1 "./include/linux/rtnetlink.h" 1 +# 10 "./include/linux/rtnetlink.h" +# 1 "./include/uapi/linux/rtnetlink.h" 1 + + + + + + + +# 1 "./include/uapi/linux/if_addr.h" 1 + + + + + + + +struct ifaddrmsg { + __u8 ifa_family; + __u8 ifa_prefixlen; + __u8 ifa_flags; + __u8 ifa_scope; + __u32 ifa_index; +}; +# 26 "./include/uapi/linux/if_addr.h" +enum { + IFA_UNSPEC, + IFA_ADDRESS, + IFA_LOCAL, + IFA_LABEL, + IFA_BROADCAST, + IFA_ANYCAST, + IFA_CACHEINFO, + IFA_MULTICAST, + IFA_FLAGS, + IFA_RT_PRIORITY, + IFA_TARGET_NETNSID, + __IFA_MAX, +}; +# 59 "./include/uapi/linux/if_addr.h" +struct ifa_cacheinfo { + __u32 ifa_prefered; + __u32 ifa_valid; + __u32 cstamp; + __u32 tstamp; +}; +# 9 "./include/uapi/linux/rtnetlink.h" 2 +# 24 "./include/uapi/linux/rtnetlink.h" +enum { + RTM_BASE = 16, + + + RTM_NEWLINK = 16, + + RTM_DELLINK, + + RTM_GETLINK, + + RTM_SETLINK, + + + RTM_NEWADDR = 20, + + RTM_DELADDR, + + RTM_GETADDR, + + + RTM_NEWROUTE = 24, + + RTM_DELROUTE, + + RTM_GETROUTE, + + + RTM_NEWNEIGH = 28, + + RTM_DELNEIGH, + + RTM_GETNEIGH, + + + RTM_NEWRULE = 32, + + RTM_DELRULE, + + RTM_GETRULE, + + + RTM_NEWQDISC = 36, + + RTM_DELQDISC, + + RTM_GETQDISC, + + + RTM_NEWTCLASS = 40, + + RTM_DELTCLASS, + + RTM_GETTCLASS, + + + RTM_NEWTFILTER = 44, + + RTM_DELTFILTER, + + RTM_GETTFILTER, + + + RTM_NEWACTION = 48, + + RTM_DELACTION, + + RTM_GETACTION, + + + RTM_NEWPREFIX = 52, + + + RTM_GETMULTICAST = 58, + + + RTM_GETANYCAST = 62, + + + RTM_NEWNEIGHTBL = 64, + + RTM_GETNEIGHTBL = 66, + + RTM_SETNEIGHTBL, + + + RTM_NEWNDUSEROPT = 68, + + + RTM_NEWADDRLABEL = 72, + + RTM_DELADDRLABEL, + + RTM_GETADDRLABEL, + + + RTM_GETDCB = 78, + + RTM_SETDCB, + + + RTM_NEWNETCONF = 80, + + RTM_DELNETCONF, + + RTM_GETNETCONF = 82, + + + RTM_NEWMDB = 84, + + RTM_DELMDB = 85, + + RTM_GETMDB = 86, + + + RTM_NEWNSID = 88, + + RTM_DELNSID = 89, + + RTM_GETNSID = 90, + + + RTM_NEWSTATS = 92, + + RTM_GETSTATS = 94, + + + RTM_NEWCACHEREPORT = 96, + + + RTM_NEWCHAIN = 100, + + RTM_DELCHAIN, + + RTM_GETCHAIN, + + + RTM_NEWNEXTHOP = 104, + + RTM_DELNEXTHOP, + + RTM_GETNEXTHOP, + + + RTM_NEWLINKPROP = 108, + + RTM_DELLINKPROP, + + RTM_GETLINKPROP, + + + RTM_NEWVLAN = 112, + + RTM_DELVLAN, + + RTM_GETVLAN, + + + __RTM_MAX, + +}; +# 195 "./include/uapi/linux/rtnetlink.h" +struct rtattr { + unsigned short rta_len; + unsigned short rta_type; +}; +# 221 "./include/uapi/linux/rtnetlink.h" +struct rtmsg { + unsigned char rtm_family; + unsigned char rtm_dst_len; + unsigned char rtm_src_len; + unsigned char rtm_tos; + + unsigned char rtm_table; + unsigned char rtm_protocol; + unsigned char rtm_scope; + unsigned char rtm_type; + + unsigned rtm_flags; +}; + + + +enum { + RTN_UNSPEC, + RTN_UNICAST, + RTN_LOCAL, + RTN_BROADCAST, + + RTN_ANYCAST, + + RTN_MULTICAST, + RTN_BLACKHOLE, + RTN_UNREACHABLE, + RTN_PROHIBIT, + RTN_THROW, + RTN_NAT, + RTN_XRESOLVE, + __RTN_MAX +}; +# 302 "./include/uapi/linux/rtnetlink.h" +enum rt_scope_t { + RT_SCOPE_UNIVERSE=0, + + RT_SCOPE_SITE=200, + RT_SCOPE_LINK=253, + RT_SCOPE_HOST=254, + RT_SCOPE_NOWHERE=255 +}; +# 324 "./include/uapi/linux/rtnetlink.h" +enum rt_class_t { + RT_TABLE_UNSPEC=0, + + RT_TABLE_COMPAT=252, + RT_TABLE_DEFAULT=253, + RT_TABLE_MAIN=254, + RT_TABLE_LOCAL=255, + RT_TABLE_MAX=0xFFFFFFFF +}; + + + + +enum rtattr_type_t { + RTA_UNSPEC, + RTA_DST, + RTA_SRC, + RTA_IIF, + RTA_OIF, + RTA_GATEWAY, + RTA_PRIORITY, + RTA_PREFSRC, + RTA_METRICS, + RTA_MULTIPATH, + RTA_PROTOINFO, + RTA_FLOW, + RTA_CACHEINFO, + RTA_SESSION, + RTA_MP_ALGO, + RTA_TABLE, + RTA_MARK, + RTA_MFC_STATS, + RTA_VIA, + RTA_NEWDST, + RTA_PREF, + RTA_ENCAP_TYPE, + RTA_ENCAP, + RTA_EXPIRES, + RTA_PAD, + RTA_UID, + RTA_TTL_PROPAGATE, + RTA_IP_PROTO, + RTA_SPORT, + RTA_DPORT, + RTA_NH_ID, + __RTA_MAX +}; +# 386 "./include/uapi/linux/rtnetlink.h" +struct rtnexthop { + unsigned short rtnh_len; + unsigned char rtnh_flags; + unsigned char rtnh_hops; + int rtnh_ifindex; +}; +# 416 "./include/uapi/linux/rtnetlink.h" +struct rtvia { + __kernel_sa_family_t rtvia_family; + __u8 rtvia_addr[0]; +}; + + + +struct rta_cacheinfo { + __u32 rta_clntref; + __u32 rta_lastuse; + __s32 rta_expires; + __u32 rta_error; + __u32 rta_used; + + + __u32 rta_id; + __u32 rta_ts; + __u32 rta_tsage; +}; + + + +enum { + RTAX_UNSPEC, + + RTAX_LOCK, + + RTAX_MTU, + + RTAX_WINDOW, + + RTAX_RTT, + + RTAX_RTTVAR, + + RTAX_SSTHRESH, + + RTAX_CWND, + + RTAX_ADVMSS, + + RTAX_REORDERING, + + RTAX_HOPLIMIT, + + RTAX_INITCWND, + + RTAX_FEATURES, + + RTAX_RTO_MIN, + + RTAX_INITRWND, + + RTAX_QUICKACK, + + RTAX_CC_ALGO, + + RTAX_FASTOPEN_NO_COOKIE, + + __RTAX_MAX +}; +# 488 "./include/uapi/linux/rtnetlink.h" +struct rta_session { + __u8 proto; + __u8 pad1; + __u16 pad2; + + union { + struct { + __u16 sport; + __u16 dport; + } ports; + + struct { + __u8 type; + __u8 code; + __u16 ident; + } icmpt; + + __u32 spi; + } u; +}; + +struct rta_mfc_stats { + __u64 mfcs_packets; + __u64 mfcs_bytes; + __u64 mfcs_wrong_if; +}; + + + + + +struct rtgenmsg { + unsigned char rtgen_family; +}; +# 532 "./include/uapi/linux/rtnetlink.h" +struct ifinfomsg { + unsigned char ifi_family; + unsigned char __ifi_pad; + unsigned short ifi_type; + int ifi_index; + unsigned ifi_flags; + unsigned ifi_change; +}; + + + + + +struct prefixmsg { + unsigned char prefix_family; + unsigned char prefix_pad1; + unsigned short prefix_pad2; + int prefix_ifindex; + unsigned char prefix_type; + unsigned char prefix_len; + unsigned char prefix_flags; + unsigned char prefix_pad3; +}; + +enum +{ + PREFIX_UNSPEC, + PREFIX_ADDRESS, + PREFIX_CACHEINFO, + __PREFIX_MAX +}; + + + +struct prefix_cacheinfo { + __u32 preferred_time; + __u32 valid_time; +}; + + + + + + +struct tcmsg { + unsigned char tcm_family; + unsigned char tcm__pad1; + unsigned short tcm__pad2; + int tcm_ifindex; + __u32 tcm_handle; + __u32 tcm_parent; + + + + + __u32 tcm_info; +}; + + + + + + + +enum { + TCA_UNSPEC, + TCA_KIND, + TCA_OPTIONS, + TCA_STATS, + TCA_XSTATS, + TCA_RATE, + TCA_FCNT, + TCA_STATS2, + TCA_STAB, + TCA_PAD, + TCA_DUMP_INVISIBLE, + TCA_CHAIN, + TCA_HW_OFFLOAD, + TCA_INGRESS_BLOCK, + TCA_EGRESS_BLOCK, + TCA_DUMP_FLAGS, + __TCA_MAX +}; +# 630 "./include/uapi/linux/rtnetlink.h" +struct nduseroptmsg { + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + unsigned short nduseropt_opts_len; + int nduseropt_ifindex; + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + unsigned short nduseropt_pad2; + unsigned int nduseropt_pad3; + +}; + +enum { + NDUSEROPT_UNSPEC, + NDUSEROPT_SRCADDR, + __NDUSEROPT_MAX +}; +# 674 "./include/uapi/linux/rtnetlink.h" +enum rtnetlink_groups { + RTNLGRP_NONE, + + RTNLGRP_LINK, + + RTNLGRP_NOTIFY, + + RTNLGRP_NEIGH, + + RTNLGRP_TC, + + RTNLGRP_IPV4_IFADDR, + + RTNLGRP_IPV4_MROUTE, + + RTNLGRP_IPV4_ROUTE, + + RTNLGRP_IPV4_RULE, + + RTNLGRP_IPV6_IFADDR, + + RTNLGRP_IPV6_MROUTE, + + RTNLGRP_IPV6_ROUTE, + + RTNLGRP_IPV6_IFINFO, + + RTNLGRP_DECnet_IFADDR, + + RTNLGRP_NOP2, + RTNLGRP_DECnet_ROUTE, + + RTNLGRP_DECnet_RULE, + + RTNLGRP_NOP4, + RTNLGRP_IPV6_PREFIX, + + RTNLGRP_IPV6_RULE, + + RTNLGRP_ND_USEROPT, + + RTNLGRP_PHONET_IFADDR, + + RTNLGRP_PHONET_ROUTE, + + RTNLGRP_DCB, + + RTNLGRP_IPV4_NETCONF, + + RTNLGRP_IPV6_NETCONF, + + RTNLGRP_MDB, + + RTNLGRP_MPLS_ROUTE, + + RTNLGRP_NSID, + + RTNLGRP_MPLS_NETCONF, + + RTNLGRP_IPV4_MROUTE_R, + + RTNLGRP_IPV6_MROUTE_R, + + RTNLGRP_NEXTHOP, + + RTNLGRP_BRVLAN, + + __RTNLGRP_MAX +}; + + + +struct tcamsg { + unsigned char tca_family; + unsigned char tca__pad1; + unsigned short tca__pad2; +}; + +enum { + TCA_ROOT_UNSPEC, + TCA_ROOT_TAB, + + + TCA_ROOT_FLAGS, + TCA_ROOT_COUNT, + TCA_ROOT_TIME_DELTA, + __TCA_ROOT_MAX, + +}; +# 11 "./include/linux/rtnetlink.h" 2 + +extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); +extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); +extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, + u32 group, struct nlmsghdr *nlh, gfp_t flags); +extern void rtnl_set_sk_err(struct net *net, u32 group, int error); +extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); +extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, + u32 id, long expires, u32 error); + +void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); +void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, + gfp_t flags, int *new_nsid, int new_ifindex); +struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, + unsigned change, u32 event, + gfp_t flags, int *new_nsid, + int new_ifindex); +void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, + gfp_t flags); + + + +extern void rtnl_lock(void); +extern void rtnl_unlock(void); +extern int rtnl_trylock(void); +extern int rtnl_is_locked(void); +extern int rtnl_lock_killable(void); +extern bool refcount_dec_and_rtnl_lock(refcount_t *r); + +extern wait_queue_head_t netdev_unregistering_wq; +extern struct rw_semaphore pernet_ops_rwsem; +extern struct rw_semaphore net_rwsem; + + +extern bool lockdep_rtnl_is_held(void); +# 83 "./include/linux/rtnetlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct netdev_queue *dev_ingress_queue(struct net_device *dev) +{ + return ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rtnl_is_held())))) { __warned = true; lockdep_rcu_suspicious("include/linux/rtnetlink.h", 85, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(dev->ingress_queue)) *)((dev->ingress_queue))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev) +{ + return ({ typeof(*(dev->ingress_queue)) *________p1 = (typeof(*(dev->ingress_queue)) *)({ do { extern void __compiletime_assert_1311(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((dev->ingress_queue)) == sizeof(char) || sizeof((dev->ingress_queue)) == sizeof(short) || sizeof((dev->ingress_queue)) == sizeof(int) || sizeof((dev->ingress_queue)) == sizeof(long)) || sizeof((dev->ingress_queue)) == sizeof(long long))) __compiletime_assert_1311(); } while (0); ({ typeof( _Generic(((dev->ingress_queue)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((dev->ingress_queue)))) __x = (*(const volatile typeof( _Generic(((dev->ingress_queue)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((dev->ingress_queue)))) *)&((dev->ingress_queue))); do { } while (0); (typeof((dev->ingress_queue)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rtnetlink.h", 90, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(dev->ingress_queue)) *)(________p1)); }); +} + +struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); + + +void net_inc_ingress_queue(void); +void net_dec_ingress_queue(void); + + + +void net_inc_egress_queue(void); +void net_dec_egress_queue(void); + + +void rtnetlink_init(void); +void __rtnl_unlock(void); +void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); + + + + + +extern int ndo_dflt_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, + int *idx); +extern int ndo_dflt_fdb_add(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, + u16 flags); +extern int ndo_dflt_fdb_del(struct ndmsg *ndm, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid); + +extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u16 mode, + u32 flags, u32 mask, int nlflags, + u32 filter_mask, + int (*vlan_fill)(struct sk_buff *skb, + struct net_device *dev, + u32 filter_mask)); +# 13 "./include/linux/if_vlan.h" 2 + +# 1 "./include/uapi/linux/if_vlan.h" 1 +# 21 "./include/uapi/linux/if_vlan.h" +enum vlan_ioctl_cmds { + ADD_VLAN_CMD, + DEL_VLAN_CMD, + SET_VLAN_INGRESS_PRIORITY_CMD, + SET_VLAN_EGRESS_PRIORITY_CMD, + GET_VLAN_INGRESS_PRIORITY_CMD, + GET_VLAN_EGRESS_PRIORITY_CMD, + SET_VLAN_NAME_TYPE_CMD, + SET_VLAN_FLAG_CMD, + GET_VLAN_REALDEV_NAME_CMD, + GET_VLAN_VID_CMD +}; + +enum vlan_flags { + VLAN_FLAG_REORDER_HDR = 0x1, + VLAN_FLAG_GVRP = 0x2, + VLAN_FLAG_LOOSE_BINDING = 0x4, + VLAN_FLAG_MVRP = 0x8, + VLAN_FLAG_BRIDGE_BINDING = 0x10, +}; + +enum vlan_name_types { + VLAN_NAME_TYPE_PLUS_VID, + VLAN_NAME_TYPE_RAW_PLUS_VID, + VLAN_NAME_TYPE_PLUS_VID_NO_PAD, + VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD, + VLAN_NAME_TYPE_HIGHEST +}; + +struct vlan_ioctl_args { + int cmd; + char device1[24]; + + union { + char device2[24]; + int VID; + unsigned int skb_priority; + unsigned int name_type; + unsigned int bind_type; + unsigned int flag; + } u; + + short vlan_qos; +}; +# 15 "./include/linux/if_vlan.h" 2 +# 33 "./include/linux/if_vlan.h" +struct vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +# 46 "./include/linux/if_vlan.h" +struct vlan_ethhdr { + unsigned char h_dest[6]; + unsigned char h_source[6]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb_mac_header(skb); +} +# 68 "./include/linux/if_vlan.h" +extern void vlan_ioctl_set(int (*hook)(struct net *, void *)); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_vlan_dev(const struct net_device *dev) +{ + return dev->priv_flags & IFF_802_1Q_VLAN; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vlan_get_rx_ctag_filter_info(struct net_device *dev) +{ + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1312)); }); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/if_vlan.h", 83); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1313)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (83), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1314)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1315)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1316)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vlan_drop_rx_ctag_filter_info(struct net_device *dev) +{ + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1317)); }); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/if_vlan.h", 89); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1318)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (89), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1319)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1320)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1321)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vlan_get_rx_stag_filter_info(struct net_device *dev) +{ + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1322)); }); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/if_vlan.h", 95); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1323)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (95), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1324)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1325)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1326)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vlan_drop_rx_stag_filter_info(struct net_device *dev) +{ + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1327)); }); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/linux/if_vlan.h", 101); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1328)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (101), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1329)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1330)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1331)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev); +} +# 116 "./include/linux/if_vlan.h" +struct vlan_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 rx_errors; + u32 tx_dropped; +}; + + + +extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id); +extern int vlan_for_each(struct net_device *dev, + int (*action)(struct net_device *dev, int vid, + void *arg), void *arg); +extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); +extern u16 vlan_dev_vlan_id(const struct net_device *dev); +extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); + + + + + + + +struct vlan_priority_tci_mapping { + u32 priority; + u16 vlan_qos; + struct vlan_priority_tci_mapping *next; +}; + +struct proc_dir_entry; +struct netpoll; +# 167 "./include/linux/if_vlan.h" +struct vlan_dev_priv { + unsigned int nr_ingress_mappings; + u32 ingress_priority_map[8]; + unsigned int nr_egress_mappings; + struct vlan_priority_tci_mapping *egress_priority_map[16]; + + __be16 vlan_proto; + u16 vlan_id; + u16 flags; + + struct net_device *real_dev; + unsigned char real_dev_addr[6]; + + struct proc_dir_entry *dent; + struct vlan_pcpu_stats *vlan_pcpu_stats; + + struct netpoll *netpoll; + +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 +vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) +{ + struct vlan_priority_tci_mapping *mp; + + __asm__ __volatile__("": : :"memory"); + + mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; + while (mp) { + if (mp->priority == skprio) { + return mp->vlan_qos; + + + } + mp = mp->next; + } + return 0; +} + +extern bool vlan_do_receive(struct sk_buff **skb); + +extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); +extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); + +extern int vlan_vids_add_by_dev(struct net_device *dev, + const struct net_device *by_dev); +extern void vlan_vids_del_by_dev(struct net_device *dev, + const struct net_device *by_dev); + +extern bool vlan_uses_dev(const struct net_device *dev); +# 300 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case (( __be16)(__u16)__builtin_bswap16((__u16)((0x8100)))): + case (( __be16)(__u16)__builtin_bswap16((__u16)((0x88A8)))): + return true; + default: + return false; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool vlan_hw_offload_capable(netdev_features_t features, + __be16 proto) +{ + if (proto == (( __be16)(__u16)__builtin_bswap16((__u16)((0x8100)))) && features & ((netdev_features_t)1 << (NETIF_F_HW_VLAN_CTAG_TX_BIT))) + return true; + if (proto == (( __be16)(__u16)__builtin_bswap16((__u16)((0x88A8)))) && features & ((netdev_features_t)1 << (NETIF_F_HW_VLAN_STAG_TX_BIT))) + return true; + return false; +} +# 333 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __vlan_insert_inner_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci, + unsigned int mac_len) +{ + struct vlan_ethhdr *veth; + + if (skb_cow_head(skb, 4) < 0) + return -12; + + skb_push(skb, 4); + + + if (__builtin_expect(!!(mac_len > 2), 1)) + memmove(skb->data, skb->data + 4, mac_len - 2); + skb->mac_header -= 4; + + veth = (struct vlan_ethhdr *)(skb->data + mac_len - 14); + + + if (__builtin_expect(!!(mac_len >= 2), 1)) { + + + + veth->h_vlan_proto = vlan_proto; + } else { + + + + veth->h_vlan_encapsulated_proto = skb->protocol; + } + + + veth->h_vlan_TCI = (( __be16)(__u16)__builtin_bswap16((__u16)((vlan_tci)))); + + return 0; +} +# 381 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, 14); +} +# 402 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci, + unsigned int mac_len) +{ + int err; + + err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); + if (err) { + dev_kfree_skb_any(skb); + return ((void *)0); + } + return skb; +} +# 431 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, 14); +} +# 449 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci) +{ + skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (skb) + skb->protocol = vlan_proto; + return skb; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __vlan_hwaccel_clear_tag(struct sk_buff *skb) +{ + skb->vlan_present = 0; +} +# 477 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) +{ + dst->vlan_present = src->vlan_present; + dst->vlan_proto = src->vlan_proto; + dst->vlan_tci = src->vlan_tci; +} +# 493 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + ((skb)->vlan_tci)); + if (__builtin_expect(!!(skb), 1)) + __vlan_hwaccel_clear_tag(skb); + return skb; +} +# 510 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __vlan_hwaccel_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + skb->vlan_proto = vlan_proto; + skb->vlan_tci = vlan_tci; + skb->vlan_present = 1; +} +# 525 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + + if (!eth_type_vlan(veth->h_vlan_proto)) + return -22; + + *vlan_tci = (__u16)__builtin_bswap16((__u16)(( __u16)(__be16)(veth->h_vlan_TCI))); + return 0; +} +# 543 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __vlan_hwaccel_get_tag(const struct sk_buff *skb, + u16 *vlan_tci) +{ + if (((skb)->vlan_present)) { + *vlan_tci = ((skb)->vlan_tci); + return 0; + } else { + *vlan_tci = 0; + return -22; + } +} +# 562 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) +{ + if (skb->dev->features & ((netdev_features_t)1 << (NETIF_F_HW_VLAN_CTAG_TX_BIT))) { + return __vlan_hwaccel_get_tag(skb, vlan_tci); + } else { + return __vlan_get_tag(skb, vlan_tci); + } +} +# 580 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, + int *depth) +{ + unsigned int vlan_depth = skb->mac_len; + + + + + + if (eth_type_vlan(type)) { + if (vlan_depth) { + if (({ int __ret_warn_on = !!(vlan_depth < 4); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1332)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/if_vlan.h"), "i" (591), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1333)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1334)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return 0; + vlan_depth -= 4; + } else { + vlan_depth = 14; + } + do { + struct vlan_hdr *vh; + + if (__builtin_expect(!!(!pskb_may_pull(skb, vlan_depth + 4)), 0) + ) + return 0; + + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += 4; + } while (eth_type_vlan(type)); + } + + if (depth) + *depth = vlan_depth; + + return type; +} +# 623 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __be16 vlan_get_protocol(struct sk_buff *skb) +{ + return __vlan_get_protocol(skb, skb->protocol, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void vlan_set_encap_proto(struct sk_buff *skb, + struct vlan_hdr *vhdr) +{ + __be16 proto; + unsigned short *rawp; + + + + + + + proto = vhdr->h_vlan_encapsulated_proto; + if (eth_proto_is_802_3(proto)) { + skb->protocol = proto; + return; + } + + rawp = (unsigned short *)(vhdr + 1); + if (*rawp == 0xFFFF) + + + + + + + + skb->protocol = (( __be16)(__u16)__builtin_bswap16((__u16)((0x0001)))); + else + + + + skb->protocol = (( __be16)(__u16)__builtin_bswap16((__u16)((0x0004)))); +} +# 669 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_vlan_tagged(const struct sk_buff *skb) +{ + if (!((skb)->vlan_present) && + __builtin_expect(!!(!eth_type_vlan(skb->protocol)), 1)) + return false; + + return true; +} +# 685 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_vlan_tagged_multi(struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + + if (!((skb)->vlan_present)) { + struct vlan_ethhdr *veh; + + if (__builtin_expect(!!(!eth_type_vlan(protocol)), 1)) + return false; + + if (__builtin_expect(!!(!pskb_may_pull(skb, 18)), 0)) + return false; + + veh = (struct vlan_ethhdr *)skb->data; + protocol = veh->h_vlan_encapsulated_proto; + } + + if (!eth_type_vlan(protocol)) + return false; + + return true; +} +# 715 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) netdev_features_t vlan_features_check(struct sk_buff *skb, + netdev_features_t features) +{ + if (skb_vlan_tagged_multi(skb)) { + + + + + + features &= ((netdev_features_t)1 << (NETIF_F_SG_BIT)) | ((netdev_features_t)1 << (NETIF_F_HIGHDMA_BIT)) | ((netdev_features_t)1 << (NETIF_F_HW_CSUM_BIT)) | + ((netdev_features_t)1 << (NETIF_F_FRAGLIST_BIT)) | ((netdev_features_t)1 << (NETIF_F_HW_VLAN_CTAG_TX_BIT)) | + ((netdev_features_t)1 << (NETIF_F_HW_VLAN_STAG_TX_BIT)); + } + + return features; +} +# 741 "./include/linux/if_vlan.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long compare_vlan_header(const struct vlan_hdr *h1, + const struct vlan_hdr *h2) +{ + + return *(u32 *)h1 ^ *(u32 *)h2; + + + + + +} +# 22 "./include/linux/filter.h" 2 + +# 1 "./include/crypto/sha.h" 1 +# 68 "./include/crypto/sha.h" +extern const u8 sha1_zero_message_hash[20]; + +extern const u8 sha224_zero_message_hash[28]; + +extern const u8 sha256_zero_message_hash[32]; + +extern const u8 sha384_zero_message_hash[48]; + +extern const u8 sha512_zero_message_hash[64]; + +struct sha1_state { + u32 state[20 / 4]; + u64 count; + u8 buffer[64]; +}; + +struct sha256_state { + u32 state[32 / 4]; + u64 count; + u8 buf[64]; +}; + +struct sha512_state { + u64 state[64 / 8]; + u64 count[2]; + u8 buf[128]; +}; + +struct shash_desc; + +extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); + +extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, + unsigned int len); + +extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *hash); +# 123 "./include/crypto/sha.h" +void sha1_init(__u32 *buf); +void sha1_transform(__u32 *digest, const char *data, __u32 *W); +# 136 "./include/crypto/sha.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sha256_init(struct sha256_state *sctx) +{ + sctx->state[0] = 0x6a09e667UL; + sctx->state[1] = 0xbb67ae85UL; + sctx->state[2] = 0x3c6ef372UL; + sctx->state[3] = 0xa54ff53aUL; + sctx->state[4] = 0x510e527fUL; + sctx->state[5] = 0x9b05688cUL; + sctx->state[6] = 0x1f83d9abUL; + sctx->state[7] = 0x5be0cd19UL; + sctx->count = 0; +} +void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha256_final(struct sha256_state *sctx, u8 *out); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sha224_init(struct sha256_state *sctx) +{ + sctx->state[0] = 0xc1059ed8UL; + sctx->state[1] = 0x367cd507UL; + sctx->state[2] = 0x3070dd17UL; + sctx->state[3] = 0xf70e5939UL; + sctx->state[4] = 0xffc00b31UL; + sctx->state[5] = 0x68581511UL; + sctx->state[6] = 0x64f98fa7UL; + sctx->state[7] = 0xbefa4fa4UL; + sctx->count = 0; +} +void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha224_final(struct sha256_state *sctx, u8 *out); +# 24 "./include/linux/filter.h" 2 + +# 1 "./include/net/sch_generic.h" 1 +# 9 "./include/net/sch_generic.h" +# 1 "./include/uapi/linux/pkt_cls.h" 1 +# 10 "./include/net/sch_generic.h" 2 +# 19 "./include/net/sch_generic.h" +# 1 "./include/net/gen_stats.h" 1 + + + + +# 1 "./include/uapi/linux/gen_stats.h" 1 + + + + + + +enum { + TCA_STATS_UNSPEC, + TCA_STATS_BASIC, + TCA_STATS_RATE_EST, + TCA_STATS_QUEUE, + TCA_STATS_APP, + TCA_STATS_RATE_EST64, + TCA_STATS_PAD, + TCA_STATS_BASIC_HW, + TCA_STATS_PKT64, + __TCA_STATS_MAX, +}; + + + + + + + +struct gnet_stats_basic { + __u64 bytes; + __u32 packets; +}; + + + + + + +struct gnet_stats_rate_est { + __u32 bps; + __u32 pps; +}; + + + + + + +struct gnet_stats_rate_est64 { + __u64 bps; + __u64 pps; +}; +# 59 "./include/uapi/linux/gen_stats.h" +struct gnet_stats_queue { + __u32 qlen; + __u32 backlog; + __u32 drops; + __u32 requeues; + __u32 overlimits; +}; + + + + + + +struct gnet_estimator { + signed char interval; + unsigned char ewma_log; +}; +# 6 "./include/net/gen_stats.h" 2 + + + + + +struct gnet_stats_basic_packed { + __u64 bytes; + __u64 packets; +}; + +struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; + struct u64_stats_sync syncp; +} __attribute__((__aligned__(2 * sizeof(u64)))); + +struct net_rate_estimator; + +struct gnet_dump { + spinlock_t * lock; + struct sk_buff * skb; + struct nlattr * tail; + + + int compat_tc_stats; + int compat_xstats; + int padattr; + void * xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, + struct gnet_dump *d, int padattr); + +int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, + int tc_stats_type, int xstats_type, + spinlock_t *lock, struct gnet_dump *d, + int padattr); + +int gnet_stats_copy_basic(const seqcount_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu *cpu, + struct gnet_stats_basic_packed *b); +void __gnet_stats_copy_basic(const seqcount_t *running, + struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu *cpu, + struct gnet_stats_basic_packed *b); +int gnet_stats_copy_basic_hw(const seqcount_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu *cpu, + struct gnet_stats_basic_packed *b); +int gnet_stats_copy_rate_est(struct gnet_dump *d, + struct net_rate_estimator **ptr); +int gnet_stats_copy_queue(struct gnet_dump *d, + struct gnet_stats_queue *cpu_q, + struct gnet_stats_queue *q, __u32 qlen); +void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, + const struct gnet_stats_queue *cpu_q, + const struct gnet_stats_queue *q, __u32 qlen); +int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); + +int gnet_stats_finish_copy(struct gnet_dump *d); + +int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu *cpu_bstats, + struct net_rate_estimator **rate_est, + spinlock_t *lock, + seqcount_t *running, struct nlattr *opt); +void gen_kill_estimator(struct net_rate_estimator **ptr); +int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu *cpu_bstats, + struct net_rate_estimator **ptr, + spinlock_t *lock, + seqcount_t *running, struct nlattr *opt); +bool gen_estimator_active(struct net_rate_estimator **ptr); +bool gen_estimator_read(struct net_rate_estimator **ptr, + struct gnet_stats_rate_est64 *sample); +# 20 "./include/net/sch_generic.h" 2 +# 1 "./include/net/rtnetlink.h" 1 + + + + + +# 1 "./include/net/netlink.h" 1 +# 165 "./include/net/netlink.h" +enum { + NLA_UNSPEC, + NLA_U8, + NLA_U16, + NLA_U32, + NLA_U64, + NLA_STRING, + NLA_FLAG, + NLA_MSECS, + NLA_NESTED, + NLA_NESTED_ARRAY, + NLA_NUL_STRING, + NLA_BINARY, + NLA_S8, + NLA_S16, + NLA_S32, + NLA_S64, + NLA_BITFIELD32, + NLA_REJECT, + NLA_EXACT_LEN, + NLA_MIN_LEN, + __NLA_TYPE_MAX, +}; + + + +struct netlink_range_validation { + u64 min, max; +}; + +struct netlink_range_validation_signed { + s64 min, max; +}; + +enum nla_policy_validation { + NLA_VALIDATE_NONE, + NLA_VALIDATE_RANGE, + NLA_VALIDATE_MIN, + NLA_VALIDATE_MAX, + NLA_VALIDATE_RANGE_PTR, + NLA_VALIDATE_FUNCTION, + NLA_VALIDATE_WARN_TOO_LONG, +}; +# 316 "./include/net/netlink.h" +struct nla_policy { + u8 type; + u8 validation_type; + u16 len; + union { + const u32 bitfield32_valid; + const char *reject_message; + const struct nla_policy *nested_policy; + struct netlink_range_validation *range; + struct netlink_range_validation_signed *range_signed; + struct { + s16 min, max; + }; + int (*validate)(const struct nlattr *attr, + struct netlink_ext_ack *extack); +# 348 "./include/net/netlink.h" + u16 strict_start_type; + }; +}; +# 438 "./include/net/netlink.h" +struct nl_info { + struct nlmsghdr *nlh; + struct net *nl_net; + u32 portid; + u8 skip_notify:1, + skip_notify_kernel:1; +}; +# 463 "./include/net/netlink.h" +enum netlink_validation { + NL_VALIDATE_LIBERAL = 0, + NL_VALIDATE_TRAILING = ((((1UL))) << (0)), + NL_VALIDATE_MAXTYPE = ((((1UL))) << (1)), + NL_VALIDATE_UNSPEC = ((((1UL))) << (2)), + NL_VALIDATE_STRICT_ATTRS = ((((1UL))) << (3)), + NL_VALIDATE_NESTED = ((((1UL))) << (4)), +}; +# 480 "./include/net/netlink.h" +int netlink_rcv_skb(struct sk_buff *skb, + int (*cb)(struct sk_buff *, struct nlmsghdr *, + struct netlink_ext_ack *)); +int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, + unsigned int group, int report, gfp_t flags); + +int __nla_validate(const struct nlattr *head, int len, int maxtype, + const struct nla_policy *policy, unsigned int validate, + struct netlink_ext_ack *extack); +int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, + int len, const struct nla_policy *policy, unsigned int validate, + struct netlink_ext_ack *extack); +int nla_policy_len(const struct nla_policy *, int); +struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype); +size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize); +char *nla_strdup(const struct nlattr *nla, gfp_t flags); +int nla_memcpy(void *dest, const struct nlattr *src, int count); +int nla_memcmp(const struct nlattr *nla, const void *data, size_t size); +int nla_strcmp(const struct nlattr *nla, const char *str); +struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen); +struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype, + int attrlen, int padattr); +void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen); +struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen); +struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, + int attrlen, int padattr); +void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen); +void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, + const void *data); +void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, + const void *data, int padattr); +void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data); +int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data); +int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen, + const void *data, int padattr); +int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data); +int nla_append(struct sk_buff *skb, int attrlen, const void *data); +# 526 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_msg_size(int payload) +{ + return ((int) ( ((sizeof(struct nlmsghdr))+4U -1) & ~(4U -1) )) + payload; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_total_size(int payload) +{ + return ( ((nlmsg_msg_size(payload))+4U -1) & ~(4U -1) ); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_padlen(int payload) +{ + return nlmsg_total_size(payload) - nlmsg_msg_size(payload); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *nlmsg_data(const struct nlmsghdr *nlh) +{ + return (unsigned char *) nlh + ((int) ( ((sizeof(struct nlmsghdr))+4U -1) & ~(4U -1) )); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_len(const struct nlmsghdr *nlh) +{ + return nlh->nlmsg_len - ((int) ( ((sizeof(struct nlmsghdr))+4U -1) & ~(4U -1) )); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh, + int hdrlen) +{ + unsigned char *data = nlmsg_data(nlh); + return (struct nlattr *) (data + ( ((hdrlen)+4U -1) & ~(4U -1) )); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen) +{ + return nlmsg_len(nlh) - ( ((hdrlen)+4U -1) & ~(4U -1) ); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_ok(const struct nlmsghdr *nlh, int remaining) +{ + return (remaining >= (int) sizeof(struct nlmsghdr) && + nlh->nlmsg_len >= sizeof(struct nlmsghdr) && + nlh->nlmsg_len <= remaining); +} +# 609 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlmsghdr * +nlmsg_next(const struct nlmsghdr *nlh, int *remaining) +{ + int totlen = ( ((nlh->nlmsg_len)+4U -1) & ~(4U -1) ); + + *remaining -= totlen; + + return (struct nlmsghdr *) ((unsigned char *) nlh + totlen); +} +# 635 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_parse(struct nlattr **tb, int maxtype, + const struct nlattr *head, int len, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_parse(tb, maxtype, head, len, policy, + (NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), extack); +} +# 660 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_parse_deprecated(struct nlattr **tb, int maxtype, + const struct nlattr *head, int len, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_parse(tb, maxtype, head, len, policy, + NL_VALIDATE_LIBERAL, extack); +} +# 685 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype, + const struct nlattr *head, + int len, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_parse(tb, maxtype, head, len, policy, + (NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE), extack); +} +# 707 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr *tb[], int maxtype, + const struct nla_policy *policy, + unsigned int validate, + struct netlink_ext_ack *extack) +{ + if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) { + do { static const char __msg[] = "Invalid header length"; struct netlink_ext_ack *__extack = (extack); if (__extack) __extack->_msg = __msg; } while (0); + return -22; + } + + return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), policy, validate, + extack); +} +# 734 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr *tb[], int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy, + (NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), extack); +} +# 753 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_parse_deprecated(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr *tb[], int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy, + NL_VALIDATE_LIBERAL, extack); +} +# 772 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr *tb[], int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy, + (NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE), extack); +} +# 790 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh, + int hdrlen, int attrtype) +{ + return nla_find(nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), attrtype); +} +# 812 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_validate_deprecated(const struct nlattr *head, int len, + int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_LIBERAL, + extack); +} +# 836 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_validate(const struct nlattr *head, int len, int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_validate(head, len, maxtype, policy, (NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), + extack); +} +# 852 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_validate_deprecated(const struct nlmsghdr *nlh, + int hdrlen, int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) + return -22; + + return __nla_validate(nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), maxtype, + policy, NL_VALIDATE_LIBERAL, extack); +} +# 873 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_report(const struct nlmsghdr *nlh) +{ + return !!(nlh->nlmsg_flags & 0x08); +} +# 901 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, + int type, int payload, int flags) +{ + if (__builtin_expect(!!(skb_tailroom(skb) < nlmsg_total_size(payload)), 0)) + return ((void *)0); + + return __nlmsg_put(skb, portid, seq, type, payload, flags); +} +# 921 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb, + struct netlink_callback *cb, + int type, int payload, + int flags) +{ + return nlmsg_put(skb, (*(struct netlink_skb_parms*)&((cb->skb)->cb)).portid, cb->nlh->nlmsg_seq, + type, payload, flags); +} +# 938 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *nlmsg_new(size_t payload, gfp_t flags) +{ + return alloc_skb(nlmsg_total_size(payload), flags); +} +# 952 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *nlmsg_get_pos(struct sk_buff *skb) +{ + return skb_tail_pointer(skb); +} +# 975 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nlmsg_trim(struct sk_buff *skb, const void *mark) +{ + if (mark) { + ({ int __ret_warn_on = !!((unsigned char *) mark < skb->data); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1335)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/netlink.h"), "i" (978), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1336)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1337)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + skb_trim(skb, (unsigned char *) mark - skb->data); + } +} +# 991 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh) +{ + nlmsg_trim(skb, nlh); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nlmsg_free(struct sk_buff *skb) +{ + kfree_skb(skb); +} +# 1013 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_multicast(struct sock *sk, struct sk_buff *skb, + u32 portid, unsigned int group, gfp_t flags) +{ + int err; + + (*(struct netlink_skb_parms*)&((skb)->cb)).dst_group = group; + + err = netlink_broadcast(sk, skb, portid, group, flags); + if (err > 0) + err = 0; + + return err; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid) +{ + int err; + + err = netlink_unicast(sk, skb, portid, 0x40); + if (err > 0) + err = 0; + + return err; +} +# 1071 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +nl_dump_check_consistent(struct netlink_callback *cb, + struct nlmsghdr *nlh) +{ + if (cb->prev_seq && cb->seq != cb->prev_seq) + nlh->nlmsg_flags |= 0x10; + cb->prev_seq = cb->seq; +} +# 1088 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_attr_size(int payload) +{ + return ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1))) + payload; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_total_size(int payload) +{ + return (((nla_attr_size(payload)) + 4 - 1) & ~(4 - 1)); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_padlen(int payload) +{ + return nla_total_size(payload) - nla_attr_size(payload); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_type(const struct nlattr *nla) +{ + return nla->nla_type & ~((1 << 15) | (1 << 14)); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *nla_data(const struct nlattr *nla) +{ + return (char *) nla + ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1))); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_len(const struct nlattr *nla) +{ + return nla->nla_len - ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1))); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_ok(const struct nlattr *nla, int remaining) +{ + return remaining >= (int) sizeof(*nla) && + nla->nla_len >= sizeof(*nla) && + nla->nla_len <= remaining; +} +# 1158 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlattr *nla_next(const struct nlattr *nla, int *remaining) +{ + unsigned int totlen = (((nla->nla_len) + 4 - 1) & ~(4 - 1)); + + *remaining -= totlen; + return (struct nlattr *) ((char *) nla + totlen); +} +# 1173 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlattr * +nla_find_nested(const struct nlattr *nla, int attrtype) +{ + return nla_find(nla_data(nla), nla_len(nla), attrtype); +} +# 1189 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_parse_nested(struct nlattr *tb[], int maxtype, + const struct nlattr *nla, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + if (!(nla->nla_type & (1 << 15))) { + do { static const char __msg[] = "NLA_F_NESTED is missing"; struct netlink_ext_ack *__extack = (extack); if (__extack) { __extack->_msg = __msg; __extack->bad_attr = (nla); } } while (0); + return -22; + } + + return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy, + (NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), extack); +} +# 1213 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_parse_nested_deprecated(struct nlattr *tb[], int maxtype, + const struct nlattr *nla, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy, + NL_VALIDATE_LIBERAL, extack); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) +{ + + u8 tmp = value; + + return nla_put(skb, attrtype, sizeof(u8), &tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) +{ + u16 tmp = value; + + return nla_put(skb, attrtype, sizeof(u16), &tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) +{ + __be16 tmp = value; + + return nla_put(skb, attrtype, sizeof(__be16), &tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) +{ + __be16 tmp = value; + + return nla_put_be16(skb, attrtype | (1 << 14), tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) +{ + __le16 tmp = value; + + return nla_put(skb, attrtype, sizeof(__le16), &tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) +{ + u32 tmp = value; + + return nla_put(skb, attrtype, sizeof(u32), &tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) +{ + __be32 tmp = value; + + return nla_put(skb, attrtype, sizeof(__be32), &tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) +{ + __be32 tmp = value; + + return nla_put_be32(skb, attrtype | (1 << 14), tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) +{ + __le32 tmp = value; + + return nla_put(skb, attrtype, sizeof(__le32), &tmp); +} +# 1347 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, + u64 value, int padattr) +{ + u64 tmp = value; + + return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr); +} +# 1362 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, + int padattr) +{ + __be64 tmp = value; + + return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr); +} +# 1377 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, + int padattr) +{ + __be64 tmp = value; + + return nla_put_be64(skb, attrtype | (1 << 14), tmp, + padattr); +} +# 1393 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, + int padattr) +{ + __le64 tmp = value; + + return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) +{ + s8 tmp = value; + + return nla_put(skb, attrtype, sizeof(s8), &tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) +{ + s16 tmp = value; + + return nla_put(skb, attrtype, sizeof(s16), &tmp); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) +{ + s32 tmp = value; + + return nla_put(skb, attrtype, sizeof(s32), &tmp); +} +# 1447 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, + int padattr) +{ + s64 tmp = value; + + return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_string(struct sk_buff *skb, int attrtype, + const char *str) +{ + return nla_put(skb, attrtype, strlen(str) + 1, str); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_flag(struct sk_buff *skb, int attrtype) +{ + return nla_put(skb, attrtype, 0, ((void *)0)); +} +# 1484 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_msecs(struct sk_buff *skb, int attrtype, + unsigned long njiffies, int padattr) +{ + u64 tmp = jiffies_to_msecs(njiffies); + + return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr); +} +# 1499 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_in_addr(struct sk_buff *skb, int attrtype, + __be32 addr) +{ + __be32 tmp = addr; + + return nla_put_be32(skb, attrtype, tmp); +} +# 1514 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_in6_addr(struct sk_buff *skb, int attrtype, + const struct in6_addr *addr) +{ + return nla_put(skb, attrtype, sizeof(*addr), addr); +} +# 1527 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_put_bitfield32(struct sk_buff *skb, int attrtype, + __u32 value, __u32 selector) +{ + struct nla_bitfield32 tmp = { value, selector, }; + + return nla_put(skb, attrtype, sizeof(tmp), &tmp); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 nla_get_u32(const struct nlattr *nla) +{ + return *(u32 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __be32 nla_get_be32(const struct nlattr *nla) +{ + return *(__be32 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __le32 nla_get_le32(const struct nlattr *nla) +{ + return *(__le32 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 nla_get_u16(const struct nlattr *nla) +{ + return *(u16 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __be16 nla_get_be16(const struct nlattr *nla) +{ + return *(__be16 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __le16 nla_get_le16(const struct nlattr *nla) +{ + return *(__le16 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u8 nla_get_u8(const struct nlattr *nla) +{ + return *(u8 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 nla_get_u64(const struct nlattr *nla) +{ + u64 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __be64 nla_get_be64(const struct nlattr *nla) +{ + __be64 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __le64 nla_get_le64(const struct nlattr *nla) +{ + return *(__le64 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s32 nla_get_s32(const struct nlattr *nla) +{ + return *(s32 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s16 nla_get_s16(const struct nlattr *nla) +{ + return *(s16 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s8 nla_get_s8(const struct nlattr *nla) +{ + return *(s8 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) s64 nla_get_s64(const struct nlattr *nla) +{ + s64 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + + return tmp; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_get_flag(const struct nlattr *nla) +{ + return !!nla; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long nla_get_msecs(const struct nlattr *nla) +{ + u64 msecs = nla_get_u64(nla); + + return msecs_to_jiffies((unsigned long) msecs); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __be32 nla_get_in_addr(const struct nlattr *nla) +{ + return *(__be32 *) nla_data(nla); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct in6_addr nla_get_in6_addr(const struct nlattr *nla) +{ + struct in6_addr tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + return tmp; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla) +{ + struct nla_bitfield32 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + return tmp; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *nla_memdup(const struct nlattr *src, gfp_t gfp) +{ + return kmemdup(nla_data(src), nla_len(src), gfp); +} +# 1749 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlattr *nla_nest_start_noflag(struct sk_buff *skb, + int attrtype) +{ + struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb); + + if (nla_put(skb, attrtype, 0, ((void *)0)) < 0) + return ((void *)0); + + return start; +} +# 1770 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype) +{ + return nla_nest_start_noflag(skb, attrtype | (1 << 15)); +} +# 1785 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_nest_end(struct sk_buff *skb, struct nlattr *start) +{ + start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start; + return skb->len; +} +# 1799 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) +{ + nlmsg_trim(skb, start); +} +# 1818 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __nla_validate_nested(const struct nlattr *start, int maxtype, + const struct nla_policy *policy, + unsigned int validate, + struct netlink_ext_ack *extack) +{ + return __nla_validate(nla_data(start), nla_len(start), maxtype, policy, + validate, extack); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +nla_validate_nested(const struct nlattr *start, int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_validate_nested(start, maxtype, policy, + (NL_VALIDATE_TRAILING | NL_VALIDATE_MAXTYPE | NL_VALIDATE_UNSPEC | NL_VALIDATE_STRICT_ATTRS | NL_VALIDATE_NESTED), extack); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +nla_validate_nested_deprecated(const struct nlattr *start, int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + return __nla_validate_nested(start, maxtype, policy, + NL_VALIDATE_LIBERAL, extack); +} +# 1852 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool nla_need_padding_for_64bit(struct sk_buff *skb) +{ +# 1863 "./include/net/netlink.h" + return false; +} +# 1878 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_align_64bit(struct sk_buff *skb, int padattr) +{ + if (nla_need_padding_for_64bit(skb) && + !nla_reserve(skb, padattr, 0)) + return -90; + + return 0; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int nla_total_size_64bit(int payload) +{ + return (((nla_attr_size(payload)) + 4 - 1) & ~(4 - 1)) + + + + ; +} +# 1926 "./include/net/netlink.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool nla_is_last(const struct nlattr *nla, int rem) +{ + return nla->nla_len == rem; +} + +void nla_get_range_unsigned(const struct nla_policy *pt, + struct netlink_range_validation *range); +void nla_get_range_signed(const struct nla_policy *pt, + struct netlink_range_validation_signed *range); + +int netlink_policy_dump_start(const struct nla_policy *policy, + unsigned int maxtype, + unsigned long *state); +bool netlink_policy_dump_loop(unsigned long *state); +int netlink_policy_dump_write(struct sk_buff *skb, unsigned long state); +# 7 "./include/net/rtnetlink.h" 2 + +typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, + struct netlink_ext_ack *); +typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); + +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, +}; + +void rtnl_register(int protocol, int msgtype, + rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); +int rtnl_register_module(struct module *owner, int protocol, int msgtype, + rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); +int rtnl_unregister(int protocol, int msgtype); +void rtnl_unregister_all(int protocol); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rtnl_msg_family(const struct nlmsghdr *nlh) +{ + if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg)) + return ((struct rtgenmsg *) nlmsg_data(nlh))->rtgen_family; + else + return 0; +} +# 59 "./include/net/rtnetlink.h" +struct rtnl_link_ops { + struct list_head list; + + const char *kind; + + size_t priv_size; + void (*setup)(struct net_device *dev); + + unsigned int maxtype; + const struct nla_policy *policy; + int (*validate)(struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack); + + int (*newlink)(struct net *src_net, + struct net_device *dev, + struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack); + int (*changelink)(struct net_device *dev, + struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack); + void (*dellink)(struct net_device *dev, + struct list_head *head); + + size_t (*get_size)(const struct net_device *dev); + int (*fill_info)(struct sk_buff *skb, + const struct net_device *dev); + + size_t (*get_xstats_size)(const struct net_device *dev); + int (*fill_xstats)(struct sk_buff *skb, + const struct net_device *dev); + unsigned int (*get_num_tx_queues)(void); + unsigned int (*get_num_rx_queues)(void); + + unsigned int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_changelink)(struct net_device *dev, + struct net_device *slave_dev, + struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack); + size_t (*get_slave_size)(const struct net_device *dev, + const struct net_device *slave_dev); + int (*fill_slave_info)(struct sk_buff *skb, + const struct net_device *dev, + const struct net_device *slave_dev); + struct net *(*get_link_net)(const struct net_device *dev); + size_t (*get_linkxstats_size)(const struct net_device *dev, + int attr); + int (*fill_linkxstats)(struct sk_buff *skb, + const struct net_device *dev, + int *prividx, int attr); +}; + +int __rtnl_link_register(struct rtnl_link_ops *ops); +void __rtnl_link_unregister(struct rtnl_link_ops *ops); + +int rtnl_link_register(struct rtnl_link_ops *ops); +void rtnl_link_unregister(struct rtnl_link_ops *ops); +# 135 "./include/net/rtnetlink.h" +struct rtnl_af_ops { + struct list_head list; + int family; + + int (*fill_link_af)(struct sk_buff *skb, + const struct net_device *dev, + u32 ext_filter_mask); + size_t (*get_link_af_size)(const struct net_device *dev, + u32 ext_filter_mask); + + int (*validate_link_af)(const struct net_device *dev, + const struct nlattr *attr); + int (*set_link_af)(struct net_device *dev, + const struct nlattr *attr); + + int (*fill_stats_af)(struct sk_buff *skb, + const struct net_device *dev); + size_t (*get_stats_af_size)(const struct net_device *dev); +}; + +void rtnl_af_register(struct rtnl_af_ops *ops); +void rtnl_af_unregister(struct rtnl_af_ops *ops); + +struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); +struct net_device *rtnl_create_link(struct net *net, const char *ifname, + unsigned char name_assign_type, + const struct rtnl_link_ops *ops, + struct nlattr *tb[], + struct netlink_ext_ack *extack); +int rtnl_delete_link(struct net_device *dev); +int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); + +int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, + struct netlink_ext_ack *exterr); +struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); +# 21 "./include/net/sch_generic.h" 2 +# 1 "./include/net/flow_offload.h" 1 + + + + + + + +# 1 "./include/linux/rhashtable.h" 1 +# 23 "./include/linux/rhashtable.h" +# 1 "./include/linux/jhash.h" 1 +# 27 "./include/linux/jhash.h" +# 1 "./include/linux/unaligned/packed_struct.h" 1 + + + + + +struct __una_u16 { u16 x; } __attribute__((__packed__)); +struct __una_u32 { u32 x; } __attribute__((__packed__)); +struct __una_u64 { u64 x; } __attribute__((__packed__)); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 __get_unaligned_cpu16(const void *p) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *)p; + return ptr->x; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __get_unaligned_cpu32(const void *p) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *)p; + return ptr->x; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 __get_unaligned_cpu64(const void *p) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *)p; + return ptr->x; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __put_unaligned_cpu16(u16 val, void *p) +{ + struct __una_u16 *ptr = (struct __una_u16 *)p; + ptr->x = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __put_unaligned_cpu32(u32 val, void *p) +{ + struct __una_u32 *ptr = (struct __una_u32 *)p; + ptr->x = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __put_unaligned_cpu64(u64 val, void *p) +{ + struct __una_u64 *ptr = (struct __una_u64 *)p; + ptr->x = val; +} +# 28 "./include/linux/jhash.h" 2 +# 70 "./include/linux/jhash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const u8 *k = key; + + + a = b = c = 0xdeadbeef + length + initval; + + + while (length > 12) { + a += __get_unaligned_cpu32(k); + b += __get_unaligned_cpu32(k + 4); + c += __get_unaligned_cpu32(k + 8); + { a -= c; a ^= rol32(c, 4); c += b; b -= a; b ^= rol32(a, 6); a += c; c -= b; c ^= rol32(b, 8); b += a; a -= c; a ^= rol32(c, 16); c += b; b -= a; b ^= rol32(a, 19); a += c; c -= b; c ^= rol32(b, 4); b += a; }; + length -= 12; + k += 12; + } + + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + { c ^= b; c -= rol32(b, 14); a ^= c; a -= rol32(c, 11); b ^= a; b -= rol32(a, 25); c ^= b; c -= rol32(b, 16); a ^= c; a -= rol32(c, 4); b ^= a; b -= rol32(a, 14); c ^= b; c -= rol32(b, 24); }; + case 0: + break; + } + + return c; +} +# 116 "./include/linux/jhash.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 jhash2(const u32 *k, u32 length, u32 initval) +{ + u32 a, b, c; + + + a = b = c = 0xdeadbeef + (length<<2) + initval; + + + while (length > 3) { + a += k[0]; + b += k[1]; + c += k[2]; + { a -= c; a ^= rol32(c, 4); c += b; b -= a; b ^= rol32(a, 6); a += c; c -= b; c ^= rol32(b, 8); b += a; a -= c; a ^= rol32(c, 16); c += b; b -= a; b ^= rol32(a, 19); a += c; c -= b; c ^= rol32(b, 4); b += a; }; + length -= 3; + k += 3; + } + + + switch (length) { + case 3: c += k[2]; + case 2: b += k[1]; + case 1: a += k[0]; + { c ^= b; c -= rol32(b, 14); a ^= c; a -= rol32(c, 11); b ^= a; b -= rol32(a, 25); c ^= b; c -= rol32(b, 16); a ^= c; a -= rol32(c, 4); b ^= a; b -= rol32(a, 14); c ^= b; c -= rol32(b, 24); }; + case 0: + break; + } + + return c; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) +{ + a += initval; + b += initval; + c += initval; + + { c ^= b; c -= rol32(b, 14); a ^= c; a -= rol32(c, 11); b ^= a; b -= rol32(a, 25); c ^= b; c -= rol32(b, 16); a ^= c; a -= rol32(c, 4); b ^= a; b -= rol32(a, 14); c ^= b; c -= rol32(b, 24); }; + + return c; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) +{ + return __jhash_nwords(a, b, c, initval + 0xdeadbeef + (3 << 2)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return __jhash_nwords(a, b, 0, initval + 0xdeadbeef + (2 << 2)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 jhash_1word(u32 a, u32 initval) +{ + return __jhash_nwords(a, 0, 0, initval + 0xdeadbeef + (1 << 2)); +} +# 24 "./include/linux/rhashtable.h" 2 +# 47 "./include/linux/rhashtable.h" +struct rhash_lock_head {}; +# 76 "./include/linux/rhashtable.h" +struct bucket_table { + unsigned int size; + unsigned int nest; + u32 hash_rnd; + struct list_head walkers; + struct callback_head rcu; + + struct bucket_table *future_tbl; + + struct lockdep_map dep_map; + + struct rhash_lock_head *buckets[] __attribute__((__aligned__((1 << (6))))); +}; +# 108 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rht_is_a_nulls(const struct rhash_head *ptr) +{ + return ((unsigned long) ptr & 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *rht_obj(const struct rhashtable *ht, + const struct rhash_head *he) +{ + return (char *)he - ht->p.head_offset; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int rht_bucket_index(const struct bucket_table *tbl, + unsigned int hash) +{ + return hash & (tbl->size - 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int rht_key_get_hash(struct rhashtable *ht, + const void *key, const struct rhashtable_params params, + unsigned int hash_rnd) +{ + unsigned int hash; + + + if (!__builtin_constant_p(params.key_len)) + hash = ht->p.hashfn(key, ht->key_len, hash_rnd); + else if (params.key_len) { + unsigned int key_len = params.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, hash_rnd); + else if (key_len & (sizeof(u32) - 1)) + hash = jhash(key, key_len, hash_rnd); + else + hash = jhash2(key, key_len / sizeof(u32), hash_rnd); + } else { + unsigned int key_len = ht->p.key_len; + + if (params.hashfn) + hash = params.hashfn(key, key_len, hash_rnd); + else + hash = jhash(key, key_len, hash_rnd); + } + + return hash; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int rht_key_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const void *key, const struct rhashtable_params params) +{ + unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); + + return rht_bucket_index(tbl, hash); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int rht_head_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const struct rhash_head *he, const struct rhashtable_params params) +{ + const char *ptr = rht_obj(ht, he); + + return __builtin_expect(!!(params.obj_hashfn), 1) ? + rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: + ht->p.key_len, + tbl->hash_rnd)) : + rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rht_grow_above_75(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + + return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && + (!ht->p.max_size || tbl->size < ht->p.max_size); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rht_shrink_below_30(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + + return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && + tbl->size > ht->p.min_size; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rht_grow_above_100(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + return atomic_read(&ht->nelems) > tbl->size && + (!ht->p.max_size || tbl->size < ht->p.max_size); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool rht_grow_above_max(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + return atomic_read(&ht->nelems) >= ht->max_elems; +} + + +int lockdep_rht_mutex_is_held(struct rhashtable *ht); +int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); +# 242 "./include/linux/rhashtable.h" +void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, + struct rhash_head *obj); + +void rhashtable_walk_enter(struct rhashtable *ht, + struct rhashtable_iter *iter); +void rhashtable_walk_exit(struct rhashtable_iter *iter); +int rhashtable_walk_start_check(struct rhashtable_iter *iter) ; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rhashtable_walk_start(struct rhashtable_iter *iter) +{ + (void)rhashtable_walk_start_check(iter); +} + +void *rhashtable_walk_next(struct rhashtable_iter *iter); +void *rhashtable_walk_peek(struct rhashtable_iter *iter); +void rhashtable_walk_stop(struct rhashtable_iter *iter) ; + +void rhashtable_free_and_destroy(struct rhashtable *ht, + void (*free_fn)(void *ptr, void *arg), + void *arg); +void rhashtable_destroy(struct rhashtable *ht); + +struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash); +struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash); +struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash); +# 287 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhash_lock_head *const *rht_bucket( + const struct bucket_table *tbl, unsigned int hash) +{ + return __builtin_expect(!!(tbl->nest), 0) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhash_lock_head **rht_bucket_var( + struct bucket_table *tbl, unsigned int hash) +{ + return __builtin_expect(!!(tbl->nest), 0) ? __rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhash_lock_head **rht_bucket_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) +{ + return __builtin_expect(!!(tbl->nest), 0) ? rht_bucket_nested_insert(ht, tbl, hash) : + &tbl->buckets[hash]; +} +# 327 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rht_lock(struct bucket_table *tbl, + struct rhash_lock_head **bkt) +{ + local_bh_disable(); + bit_spin_lock(0, (unsigned long *)bkt); + lock_acquire(&tbl->dep_map, 0, 0, 0, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; })); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rht_lock_nested(struct bucket_table *tbl, + struct rhash_lock_head **bucket, + unsigned int subclass) +{ + local_bh_disable(); + bit_spin_lock(0, (unsigned long *)bucket); + lock_acquire(&tbl->dep_map, subclass, 0, 0, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; })); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rht_unlock(struct bucket_table *tbl, + struct rhash_lock_head **bkt) +{ + lock_release(&tbl->dep_map, ({ __label__ __here; __here: (unsigned long)&&__here; })); + bit_spin_unlock(0, (unsigned long *)bkt); + local_bh_enable(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhash_head *__rht_ptr( + struct rhash_lock_head *const *bkt) +{ + return (struct rhash_head *) + ((unsigned long)*bkt & ~((((1UL))) << (0)) ?: + (unsigned long)((void *)(1UL | (((long)((unsigned long) (bkt)) >> 1) << 1)))); +} +# 367 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhash_head *rht_ptr_rcu( + struct rhash_lock_head *const *bkt) +{ + struct rhash_head *p = __rht_ptr(bkt); + + return ({ typeof(*(p)) *________p1 = (typeof(*(p)) *)({ do { extern void __compiletime_assert_1338(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((p)) == sizeof(char) || sizeof((p)) == sizeof(short) || sizeof((p)) == sizeof(int) || sizeof((p)) == sizeof(long)) || sizeof((p)) == sizeof(long long))) __compiletime_assert_1338(); } while (0); ({ typeof( _Generic(((p)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((p)))) __x = (*(const volatile typeof( _Generic(((p)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((p)))) *)&((p))); do { } while (0); (typeof((p)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 372, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(p)) *)(________p1)); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhash_head *rht_ptr( + struct rhash_lock_head *const *bkt, + struct bucket_table *tbl, + unsigned int hash) +{ + return ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 380, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(__rht_ptr(bkt))) *)((__rht_ptr(bkt)))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhash_head *rht_ptr_exclusive( + struct rhash_lock_head *const *bkt) +{ + return ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 386, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(__rht_ptr(bkt))) *)((__rht_ptr(bkt)))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rht_assign_locked(struct rhash_lock_head **bkt, + struct rhash_head *obj) +{ + struct rhash_head **p = (struct rhash_head **)bkt; + + if (rht_is_a_nulls(obj)) + obj = ((void *)0); + do { uintptr_t _r_a_p__v = (uintptr_t)((void *)((unsigned long)obj | ((((1UL))) << (0)))); ; if (__builtin_constant_p((void *)((unsigned long)obj | ((((1UL))) << (0)))) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1339(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((*p)) == sizeof(char) || sizeof((*p)) == sizeof(short) || sizeof((*p)) == sizeof(int) || sizeof((*p)) == sizeof(long)) || sizeof((*p)) == sizeof(long long))) __compiletime_assert_1339(); } while (0); do { *(volatile typeof((*p)) *)&((*p)) = ((typeof(*p))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1340(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&*p) == sizeof(char) || sizeof(*&*p) == sizeof(short) || sizeof(*&*p) == sizeof(int) || sizeof(*&*p) == sizeof(long)))) __compiletime_assert_1340(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1341(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&*p) == sizeof(char) || sizeof(*&*p) == sizeof(short) || sizeof(*&*p) == sizeof(int) || sizeof(*&*p) == sizeof(long)) || sizeof(*&*p) == sizeof(long long))) __compiletime_assert_1341(); } while (0); do { *(volatile typeof(*&*p) *)&(*&*p) = ((typeof(*((typeof(*p))_r_a_p__v)) *)((typeof(*p))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rht_assign_unlock(struct bucket_table *tbl, + struct rhash_lock_head **bkt, + struct rhash_head *obj) +{ + struct rhash_head **p = (struct rhash_head **)bkt; + + if (rht_is_a_nulls(obj)) + obj = ((void *)0); + lock_release(&tbl->dep_map, ({ __label__ __here; __here: (unsigned long)&&__here; })); + do { uintptr_t _r_a_p__v = (uintptr_t)(obj); ; if (__builtin_constant_p(obj) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1342(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((*p)) == sizeof(char) || sizeof((*p)) == sizeof(short) || sizeof((*p)) == sizeof(int) || sizeof((*p)) == sizeof(long)) || sizeof((*p)) == sizeof(long long))) __compiletime_assert_1342(); } while (0); do { *(volatile typeof((*p)) *)&((*p)) = ((typeof(*p))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1343(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&*p) == sizeof(char) || sizeof(*&*p) == sizeof(short) || sizeof(*&*p) == sizeof(int) || sizeof(*&*p) == sizeof(long)))) __compiletime_assert_1343(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1344(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&*p) == sizeof(char) || sizeof(*&*p) == sizeof(short) || sizeof(*&*p) == sizeof(int) || sizeof(*&*p) == sizeof(long)) || sizeof(*&*p) == sizeof(long long))) __compiletime_assert_1344(); } while (0); do { *(volatile typeof(*&*p) *)&(*&*p) = ((typeof(*((typeof(*p))_r_a_p__v)) *)((typeof(*p))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); + (void)0; + local_bh_enable(); +} +# 578 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhashtable_compare(struct rhashtable_compare_arg *arg, + const void *obj) +{ + struct rhashtable *ht = arg->ht; + const char *ptr = obj; + + return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhash_head *__rhashtable_lookup( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + struct rhash_lock_head *const *bkt; + struct bucket_table *tbl; + struct rhash_head *he; + unsigned int hash; + + tbl = ({ typeof(*(ht->tbl)) *________p1 = (typeof(*(ht->tbl)) *)({ do { extern void __compiletime_assert_1345(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((ht->tbl)) == sizeof(char) || sizeof((ht->tbl)) == sizeof(short) || sizeof((ht->tbl)) == sizeof(int) || sizeof((ht->tbl)) == sizeof(long)) || sizeof((ht->tbl)) == sizeof(long long))) __compiletime_assert_1345(); } while (0); ({ typeof( _Generic(((ht->tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((ht->tbl)))) __x = (*(const volatile typeof( _Generic(((ht->tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((ht->tbl)))) *)&((ht->tbl))); do { } while (0); (typeof((ht->tbl)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_mutex_is_held(ht)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 601, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(ht->tbl)) *)(________p1)); }); +restart: + hash = rht_key_hashfn(ht, tbl, key, params); + bkt = rht_bucket(tbl, hash); + do { + for (({__asm__ __volatile__("": : :"memory"); }), he = rht_ptr_rcu(bkt); !rht_is_a_nulls(he); he = ({ typeof(he->next) ________p1 = ({ do { extern void __compiletime_assert_1346(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(he->next) == sizeof(char) || sizeof(he->next) == sizeof(short) || sizeof(he->next) == sizeof(int) || sizeof(he->next) == sizeof(long)) || sizeof(he->next) == sizeof(long long))) __compiletime_assert_1346(); } while (0); ({ typeof( _Generic((he->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (he->next))) __x = (*(const volatile typeof( _Generic((he->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (he->next))) *)&(he->next)); do { } while (0); (typeof(he->next))__x; }); }); ((typeof(*he->next) *)(________p1)); })) { + if (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, he)) : + rhashtable_compare(&arg, rht_obj(ht, he))) + continue; + return he; + } + + + + } while (he != ((void *)(1UL | (((long)((unsigned long) (bkt)) >> 1) << 1)))); + + + __asm__ __volatile__("": : :"memory"); + + tbl = ({ typeof(*(tbl->future_tbl)) *________p1 = (typeof(*(tbl->future_tbl)) *)({ do { extern void __compiletime_assert_1347(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((tbl->future_tbl)) == sizeof(char) || sizeof((tbl->future_tbl)) == sizeof(short) || sizeof((tbl->future_tbl)) == sizeof(int) || sizeof((tbl->future_tbl)) == sizeof(long)) || sizeof((tbl->future_tbl)) == sizeof(long long))) __compiletime_assert_1347(); } while (0); ({ typeof( _Generic(((tbl->future_tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->future_tbl)))) __x = (*(const volatile typeof( _Generic(((tbl->future_tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->future_tbl)))) *)&((tbl->future_tbl))); do { } while (0); (typeof((tbl->future_tbl)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_mutex_is_held(ht)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 621, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(tbl->future_tbl)) *)(________p1)); }); + if (__builtin_expect(!!(tbl), 0)) + goto restart; + + return ((void *)0); +} +# 641 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *rhashtable_lookup( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhash_head *he = __rhashtable_lookup(ht, key, params); + + return he ? rht_obj(ht, he) : ((void *)0); +} +# 664 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *rhashtable_lookup_fast( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + void *obj; + + rcu_read_lock(); + obj = rhashtable_lookup(ht, key, params); + rcu_read_unlock(); + + return obj; +} +# 691 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct rhlist_head *rhltable_lookup( + struct rhltable *hlt, const void *key, + const struct rhashtable_params params) +{ + struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); + + return he ? ({ void *__mptr = (void *)(he); do { extern void __compiletime_assert_1348(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(he)), typeof(((struct rhlist_head *)0)->rhead)) && !__builtin_types_compatible_p(typeof(*(he)), typeof(void))))) __compiletime_assert_1348(); } while (0); ((struct rhlist_head *)(__mptr - __builtin_offsetof(struct rhlist_head, rhead))); }) : ((void *)0); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *__rhashtable_insert_fast( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params, bool rhlist) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + struct rhash_lock_head **bkt; + struct rhash_head **pprev; + struct bucket_table *tbl; + struct rhash_head *head; + unsigned int hash; + int elasticity; + void *data; + + rcu_read_lock(); + + tbl = ({ typeof(*(ht->tbl)) *________p1 = (typeof(*(ht->tbl)) *)({ do { extern void __compiletime_assert_1349(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((ht->tbl)) == sizeof(char) || sizeof((ht->tbl)) == sizeof(short) || sizeof((ht->tbl)) == sizeof(int) || sizeof((ht->tbl)) == sizeof(long)) || sizeof((ht->tbl)) == sizeof(long long))) __compiletime_assert_1349(); } while (0); ({ typeof( _Generic(((ht->tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((ht->tbl)))) __x = (*(const volatile typeof( _Generic(((ht->tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((ht->tbl)))) *)&((ht->tbl))); do { } while (0); (typeof((ht->tbl)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_mutex_is_held(ht)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 722, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(ht->tbl)) *)(________p1)); }); + hash = rht_head_hashfn(ht, tbl, obj, params); + elasticity = 16u; + bkt = rht_bucket_insert(ht, tbl, hash); + data = ERR_PTR(-12); + if (!bkt) + goto out; + pprev = ((void *)0); + rht_lock(tbl, bkt); + + if (__builtin_expect(!!(({ typeof(*(tbl->future_tbl)) *_________p1 = (typeof(*(tbl->future_tbl)) *)({ do { extern void __compiletime_assert_1350(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((tbl->future_tbl)) == sizeof(char) || sizeof((tbl->future_tbl)) == sizeof(short) || sizeof((tbl->future_tbl)) == sizeof(int) || sizeof((tbl->future_tbl)) == sizeof(long)) || sizeof((tbl->future_tbl)) == sizeof(long long))) __compiletime_assert_1350(); } while (0); ({ typeof( _Generic(((tbl->future_tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->future_tbl)))) __x = (*(const volatile typeof( _Generic(((tbl->future_tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->future_tbl)))) *)&((tbl->future_tbl))); do { } while (0); (typeof((tbl->future_tbl)))__x; }); }); ; ((typeof(*(tbl->future_tbl)) *)(_________p1)); })), 0)) { +slow_path: + rht_unlock(tbl, bkt); + rcu_read_unlock(); + return rhashtable_insert_slow(ht, key, obj); + } + + for (head = rht_ptr(bkt, tbl, hash); !rht_is_a_nulls(head); head = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 739, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*((head)->next)) *)(((head)->next))); })) { + struct rhlist_head *plist; + struct rhlist_head *list; + + elasticity--; + if (!key || + (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, head)) : + rhashtable_compare(&arg, rht_obj(ht, head)))) { + pprev = &head->next; + continue; + } + + data = rht_obj(ht, head); + + if (!rhlist) + goto out_unlock; + + + list = ({ void *__mptr = (void *)(obj); do { extern void __compiletime_assert_1351(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(obj)), typeof(((struct rhlist_head *)0)->rhead)) && !__builtin_types_compatible_p(typeof(*(obj)), typeof(void))))) __compiletime_assert_1351(); } while (0); ((struct rhlist_head *)(__mptr - __builtin_offsetof(struct rhlist_head, rhead))); }); + plist = ({ void *__mptr = (void *)(head); do { extern void __compiletime_assert_1352(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(head)), typeof(((struct rhlist_head *)0)->rhead)) && !__builtin_types_compatible_p(typeof(*(head)), typeof(void))))) __compiletime_assert_1352(); } while (0); ((struct rhlist_head *)(__mptr - __builtin_offsetof(struct rhlist_head, rhead))); }); + + do { ; do { do { extern void __compiletime_assert_1353(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->next) == sizeof(char) || sizeof(list->next) == sizeof(short) || sizeof(list->next) == sizeof(int) || sizeof(list->next) == sizeof(long)) || sizeof(list->next) == sizeof(long long))) __compiletime_assert_1353(); } while (0); do { *(volatile typeof(list->next) *)&(list->next) = ((typeof(*(plist)) *)(plist)); } while (0); } while (0); } while (0); + head = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 762, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(head->next)) *)((head->next))); }); + do { ; do { do { extern void __compiletime_assert_1354(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->rhead.next) == sizeof(char) || sizeof(list->rhead.next) == sizeof(short) || sizeof(list->rhead.next) == sizeof(int) || sizeof(list->rhead.next) == sizeof(long)) || sizeof(list->rhead.next) == sizeof(long long))) __compiletime_assert_1354(); } while (0); do { *(volatile typeof(list->rhead.next) *)&(list->rhead.next) = ((typeof(*(head)) *)(head)); } while (0); } while (0); } while (0); + if (pprev) { + do { uintptr_t _r_a_p__v = (uintptr_t)(obj); ; if (__builtin_constant_p(obj) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1355(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((*pprev)) == sizeof(char) || sizeof((*pprev)) == sizeof(short) || sizeof((*pprev)) == sizeof(int) || sizeof((*pprev)) == sizeof(long)) || sizeof((*pprev)) == sizeof(long long))) __compiletime_assert_1355(); } while (0); do { *(volatile typeof((*pprev)) *)&((*pprev)) = ((typeof(*pprev))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1356(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&*pprev) == sizeof(char) || sizeof(*&*pprev) == sizeof(short) || sizeof(*&*pprev) == sizeof(int) || sizeof(*&*pprev) == sizeof(long)))) __compiletime_assert_1356(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1357(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&*pprev) == sizeof(char) || sizeof(*&*pprev) == sizeof(short) || sizeof(*&*pprev) == sizeof(int) || sizeof(*&*pprev) == sizeof(long)) || sizeof(*&*pprev) == sizeof(long long))) __compiletime_assert_1357(); } while (0); do { *(volatile typeof(*&*pprev) *)&(*&*pprev) = ((typeof(*((typeof(*pprev))_r_a_p__v)) *)((typeof(*pprev))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + rht_unlock(tbl, bkt); + } else + rht_assign_unlock(tbl, bkt, obj); + data = ((void *)0); + goto out; + } + + if (elasticity <= 0) + goto slow_path; + + data = ERR_PTR(-7); + if (__builtin_expect(!!(rht_grow_above_max(ht, tbl)), 0)) + goto out_unlock; + + if (__builtin_expect(!!(rht_grow_above_100(ht, tbl)), 0)) + goto slow_path; + + + head = rht_ptr(bkt, tbl, hash); + + do { ; do { do { extern void __compiletime_assert_1358(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(obj->next) == sizeof(char) || sizeof(obj->next) == sizeof(short) || sizeof(obj->next) == sizeof(int) || sizeof(obj->next) == sizeof(long)) || sizeof(obj->next) == sizeof(long long))) __compiletime_assert_1358(); } while (0); do { *(volatile typeof(obj->next) *)&(obj->next) = ((typeof(*(head)) *)(head)); } while (0); } while (0); } while (0); + if (rhlist) { + struct rhlist_head *list; + + list = ({ void *__mptr = (void *)(obj); do { extern void __compiletime_assert_1359(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(obj)), typeof(((struct rhlist_head *)0)->rhead)) && !__builtin_types_compatible_p(typeof(*(obj)), typeof(void))))) __compiletime_assert_1359(); } while (0); ((struct rhlist_head *)(__mptr - __builtin_offsetof(struct rhlist_head, rhead))); }); + do { ; do { do { extern void __compiletime_assert_1360(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->next) == sizeof(char) || sizeof(list->next) == sizeof(short) || sizeof(list->next) == sizeof(int) || sizeof(list->next) == sizeof(long)) || sizeof(list->next) == sizeof(long long))) __compiletime_assert_1360(); } while (0); do { *(volatile typeof(list->next) *)&(list->next) = ((typeof(*(((void *)0))) *)(((void *)0))); } while (0); } while (0); } while (0); + } + + atomic_inc(&ht->nelems); + rht_assign_unlock(tbl, bkt, obj); + + if (rht_grow_above_75(ht, tbl)) + schedule_work(&ht->run_work); + + data = ((void *)0); +out: + rcu_read_unlock(); + + return data; + +out_unlock: + rht_unlock(tbl, bkt); + goto out; +} +# 826 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhashtable_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + void *ret; + + ret = __rhashtable_insert_fast(ht, ((void *)0), obj, params, false); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == ((void *)0) ? 0 : -17; +} +# 855 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhltable_insert_key( + struct rhltable *hlt, const void *key, struct rhlist_head *list, + const struct rhashtable_params params) +{ + return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead, + params, true)); +} +# 878 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhltable_insert( + struct rhltable *hlt, struct rhlist_head *list, + const struct rhashtable_params params) +{ + const char *key = rht_obj(&hlt->ht, &list->rhead); + + key += params.key_offset; + + return rhltable_insert_key(hlt, key, list, params); +} +# 903 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhashtable_lookup_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + const char *key = rht_obj(ht, obj); + void *ret; + + do { if (__builtin_expect(!!(ht->p.obj_hashfn), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1361)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/rhashtable.h"), "i" (910), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1362)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, + false); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == ((void *)0) ? 0 : -17; +} +# 930 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *rhashtable_lookup_get_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + const char *key = rht_obj(ht, obj); + + do { if (__builtin_expect(!!(ht->p.obj_hashfn), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1363)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/rhashtable.h"), "i" (936), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1364)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, + false); +} +# 957 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhashtable_lookup_insert_key( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + void *ret; + + do { if (__builtin_expect(!!(!ht->p.obj_hashfn || !key), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1365)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/rhashtable.h"), "i" (963), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1366)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + ret = __rhashtable_insert_fast(ht, key, obj, params, false); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == ((void *)0) ? 0 : -17; +} +# 983 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *rhashtable_lookup_get_insert_key( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + do { if (__builtin_expect(!!(!ht->p.obj_hashfn || !key), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1367)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/rhashtable.h"), "i" (987), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1368)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + return __rhashtable_insert_fast(ht, key, obj, params, false); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __rhashtable_remove_fast_one( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj, const struct rhashtable_params params, + bool rhlist) +{ + struct rhash_lock_head **bkt; + struct rhash_head **pprev; + struct rhash_head *he; + unsigned int hash; + int err = -2; + + hash = rht_head_hashfn(ht, tbl, obj, params); + bkt = rht_bucket_var(tbl, hash); + if (!bkt) + return -2; + pprev = ((void *)0); + rht_lock(tbl, bkt); + + for (he = rht_ptr(bkt, tbl, hash); !rht_is_a_nulls(he); he = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1011, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*((he)->next)) *)(((he)->next))); })) { + struct rhlist_head *list; + + list = ({ void *__mptr = (void *)(he); do { extern void __compiletime_assert_1369(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(he)), typeof(((struct rhlist_head *)0)->rhead)) && !__builtin_types_compatible_p(typeof(*(he)), typeof(void))))) __compiletime_assert_1369(); } while (0); ((struct rhlist_head *)(__mptr - __builtin_offsetof(struct rhlist_head, rhead))); }); + + if (he != obj) { + struct rhlist_head **lpprev; + + pprev = &he->next; + + if (!rhlist) + continue; + + do { + lpprev = &list->next; + list = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1026, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(list->next)) *)((list->next))); }) + ; + } while (list && obj != &list->rhead); + + if (!list) + continue; + + list = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1033, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(list->next)) *)((list->next))); }); + do { ; do { do { extern void __compiletime_assert_1370(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*lpprev) == sizeof(char) || sizeof(*lpprev) == sizeof(short) || sizeof(*lpprev) == sizeof(int) || sizeof(*lpprev) == sizeof(long)) || sizeof(*lpprev) == sizeof(long long))) __compiletime_assert_1370(); } while (0); do { *(volatile typeof(*lpprev) *)&(*lpprev) = ((typeof(*(list)) *)(list)); } while (0); } while (0); } while (0); + err = 0; + break; + } + + obj = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1039, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(obj->next)) *)((obj->next))); }); + err = 1; + + if (rhlist) { + list = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1043, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(list->next)) *)((list->next))); }); + if (list) { + do { ; do { do { extern void __compiletime_assert_1371(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->rhead.next) == sizeof(char) || sizeof(list->rhead.next) == sizeof(short) || sizeof(list->rhead.next) == sizeof(int) || sizeof(list->rhead.next) == sizeof(long)) || sizeof(list->rhead.next) == sizeof(long long))) __compiletime_assert_1371(); } while (0); do { *(volatile typeof(list->rhead.next) *)&(list->rhead.next) = ((typeof(*(obj)) *)(obj)); } while (0); } while (0); } while (0); + obj = &list->rhead; + err = 0; + } + } + + if (pprev) { + do { uintptr_t _r_a_p__v = (uintptr_t)(obj); ; if (__builtin_constant_p(obj) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1372(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((*pprev)) == sizeof(char) || sizeof((*pprev)) == sizeof(short) || sizeof((*pprev)) == sizeof(int) || sizeof((*pprev)) == sizeof(long)) || sizeof((*pprev)) == sizeof(long long))) __compiletime_assert_1372(); } while (0); do { *(volatile typeof((*pprev)) *)&((*pprev)) = ((typeof(*pprev))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1373(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&*pprev) == sizeof(char) || sizeof(*&*pprev) == sizeof(short) || sizeof(*&*pprev) == sizeof(int) || sizeof(*&*pprev) == sizeof(long)))) __compiletime_assert_1373(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1374(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&*pprev) == sizeof(char) || sizeof(*&*pprev) == sizeof(short) || sizeof(*&*pprev) == sizeof(int) || sizeof(*&*pprev) == sizeof(long)) || sizeof(*&*pprev) == sizeof(long long))) __compiletime_assert_1374(); } while (0); do { *(volatile typeof(*&*pprev) *)&(*&*pprev) = ((typeof(*((typeof(*pprev))_r_a_p__v)) *)((typeof(*pprev))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + rht_unlock(tbl, bkt); + } else { + rht_assign_unlock(tbl, bkt, obj); + } + goto unlocked; + } + + rht_unlock(tbl, bkt); +unlocked: + if (err > 0) { + atomic_dec(&ht->nelems); + if (__builtin_expect(!!(ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)), 0) + ) + schedule_work(&ht->run_work); + err = 0; + } + + return err; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __rhashtable_remove_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params, bool rhlist) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = ({ typeof(*(ht->tbl)) *________p1 = (typeof(*(ht->tbl)) *)({ do { extern void __compiletime_assert_1375(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((ht->tbl)) == sizeof(char) || sizeof((ht->tbl)) == sizeof(short) || sizeof((ht->tbl)) == sizeof(int) || sizeof((ht->tbl)) == sizeof(long)) || sizeof((ht->tbl)) == sizeof(long long))) __compiletime_assert_1375(); } while (0); ({ typeof( _Generic(((ht->tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((ht->tbl)))) __x = (*(const volatile typeof( _Generic(((ht->tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((ht->tbl)))) *)&((ht->tbl))); do { } while (0); (typeof((ht->tbl)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_mutex_is_held(ht)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1083, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(ht->tbl)) *)(________p1)); }); + + + + + + + while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, + rhlist)) && + (tbl = ({ typeof(*(tbl->future_tbl)) *________p1 = (typeof(*(tbl->future_tbl)) *)({ do { extern void __compiletime_assert_1376(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((tbl->future_tbl)) == sizeof(char) || sizeof((tbl->future_tbl)) == sizeof(short) || sizeof((tbl->future_tbl)) == sizeof(int) || sizeof((tbl->future_tbl)) == sizeof(long)) || sizeof((tbl->future_tbl)) == sizeof(long long))) __compiletime_assert_1376(); } while (0); ({ typeof( _Generic(((tbl->future_tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->future_tbl)))) __x = (*(const volatile typeof( _Generic(((tbl->future_tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->future_tbl)))) *)&((tbl->future_tbl))); do { } while (0); (typeof((tbl->future_tbl)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_mutex_is_held(ht)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1092, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(tbl->future_tbl)) *)(________p1)); }))) + ; + + rcu_read_unlock(); + + return err; +} +# 1115 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhashtable_remove_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + return __rhashtable_remove_fast(ht, obj, params, false); +} +# 1137 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhltable_remove( + struct rhltable *hlt, struct rhlist_head *list, + const struct rhashtable_params params) +{ + return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __rhashtable_replace_fast( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj_old, struct rhash_head *obj_new, + const struct rhashtable_params params) +{ + struct rhash_lock_head **bkt; + struct rhash_head **pprev; + struct rhash_head *he; + unsigned int hash; + int err = -2; + + + + + hash = rht_head_hashfn(ht, tbl, obj_old, params); + if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) + return -22; + + bkt = rht_bucket_var(tbl, hash); + if (!bkt) + return -2; + + pprev = ((void *)0); + rht_lock(tbl, bkt); + + for (he = rht_ptr(bkt, tbl, hash); !rht_is_a_nulls(he); he = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_bucket_is_held(tbl, hash))))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1170, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*((he)->next)) *)(((he)->next))); })) { + if (he != obj_old) { + pprev = &he->next; + continue; + } + + do { uintptr_t _r_a_p__v = (uintptr_t)(obj_old->next); ; if (__builtin_constant_p(obj_old->next) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1377(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((obj_new->next)) == sizeof(char) || sizeof((obj_new->next)) == sizeof(short) || sizeof((obj_new->next)) == sizeof(int) || sizeof((obj_new->next)) == sizeof(long)) || sizeof((obj_new->next)) == sizeof(long long))) __compiletime_assert_1377(); } while (0); do { *(volatile typeof((obj_new->next)) *)&((obj_new->next)) = ((typeof(obj_new->next))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1378(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&obj_new->next) == sizeof(char) || sizeof(*&obj_new->next) == sizeof(short) || sizeof(*&obj_new->next) == sizeof(int) || sizeof(*&obj_new->next) == sizeof(long)))) __compiletime_assert_1378(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1379(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&obj_new->next) == sizeof(char) || sizeof(*&obj_new->next) == sizeof(short) || sizeof(*&obj_new->next) == sizeof(int) || sizeof(*&obj_new->next) == sizeof(long)) || sizeof(*&obj_new->next) == sizeof(long long))) __compiletime_assert_1379(); } while (0); do { *(volatile typeof(*&obj_new->next) *)&(*&obj_new->next) = ((typeof(*((typeof(obj_new->next))_r_a_p__v)) *)((typeof(obj_new->next))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + if (pprev) { + do { uintptr_t _r_a_p__v = (uintptr_t)(obj_new); ; if (__builtin_constant_p(obj_new) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1380(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((*pprev)) == sizeof(char) || sizeof((*pprev)) == sizeof(short) || sizeof((*pprev)) == sizeof(int) || sizeof((*pprev)) == sizeof(long)) || sizeof((*pprev)) == sizeof(long long))) __compiletime_assert_1380(); } while (0); do { *(volatile typeof((*pprev)) *)&((*pprev)) = ((typeof(*pprev))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1381(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&*pprev) == sizeof(char) || sizeof(*&*pprev) == sizeof(short) || sizeof(*&*pprev) == sizeof(int) || sizeof(*&*pprev) == sizeof(long)))) __compiletime_assert_1381(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1382(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&*pprev) == sizeof(char) || sizeof(*&*pprev) == sizeof(short) || sizeof(*&*pprev) == sizeof(int) || sizeof(*&*pprev) == sizeof(long)) || sizeof(*&*pprev) == sizeof(long long))) __compiletime_assert_1382(); } while (0); do { *(volatile typeof(*&*pprev) *)&(*&*pprev) = ((typeof(*((typeof(*pprev))_r_a_p__v)) *)((typeof(*pprev))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + rht_unlock(tbl, bkt); + } else { + rht_assign_unlock(tbl, bkt, obj_new); + } + err = 0; + goto unlocked; + } + + rht_unlock(tbl, bkt); + +unlocked: + return err; +} +# 1207 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int rhashtable_replace_fast( + struct rhashtable *ht, struct rhash_head *obj_old, + struct rhash_head *obj_new, + const struct rhashtable_params params) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = ({ typeof(*(ht->tbl)) *________p1 = (typeof(*(ht->tbl)) *)({ do { extern void __compiletime_assert_1383(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((ht->tbl)) == sizeof(char) || sizeof((ht->tbl)) == sizeof(short) || sizeof((ht->tbl)) == sizeof(int) || sizeof((ht->tbl)) == sizeof(long)) || sizeof((ht->tbl)) == sizeof(long long))) __compiletime_assert_1383(); } while (0); ({ typeof( _Generic(((ht->tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((ht->tbl)))) __x = (*(const volatile typeof( _Generic(((ht->tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((ht->tbl)))) *)&((ht->tbl))); do { } while (0); (typeof((ht->tbl)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_mutex_is_held(ht)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1217, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(ht->tbl)) *)(________p1)); }); + + + + + + + while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, + obj_new, params)) && + (tbl = ({ typeof(*(tbl->future_tbl)) *________p1 = (typeof(*(tbl->future_tbl)) *)({ do { extern void __compiletime_assert_1384(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((tbl->future_tbl)) == sizeof(char) || sizeof((tbl->future_tbl)) == sizeof(short) || sizeof((tbl->future_tbl)) == sizeof(int) || sizeof((tbl->future_tbl)) == sizeof(long)) || sizeof((tbl->future_tbl)) == sizeof(long long))) __compiletime_assert_1384(); } while (0); ({ typeof( _Generic(((tbl->future_tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->future_tbl)))) __x = (*(const volatile typeof( _Generic(((tbl->future_tbl)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->future_tbl)))) *)&((tbl->future_tbl))); do { } while (0); (typeof((tbl->future_tbl)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rht_mutex_is_held(ht)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/linux/rhashtable.h", 1226, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(tbl->future_tbl)) *)(________p1)); }))) + ; + + rcu_read_unlock(); + + return err; +} +# 1255 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rhltable_walk_enter(struct rhltable *hlt, + struct rhashtable_iter *iter) +{ + return rhashtable_walk_enter(&hlt->ht, iter); +} +# 1269 "./include/linux/rhashtable.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rhltable_free_and_destroy(struct rhltable *hlt, + void (*free_fn)(void *ptr, + void *arg), + void *arg) +{ + return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rhltable_destroy(struct rhltable *hlt) +{ + return rhltable_free_and_destroy(hlt, ((void *)0), ((void *)0)); +} +# 9 "./include/net/flow_offload.h" 2 + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_meta { + struct flow_dissector_key_meta *key, *mask; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key, *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key, *mask; +}; + +struct flow_match_mpls { + struct flow_dissector_key_mpls *key, *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; + +struct flow_match_enc_opts { + struct flow_dissector_key_enc_opts *key, *mask; +}; + +struct flow_match_ct { + struct flow_dissector_key_ct *key, *mask; +}; + +struct flow_rule; + +void flow_rule_match_meta(const struct flow_rule *rule, + struct flow_match_meta *out); +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +void flow_rule_match_cvlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_ip(const struct flow_rule *rule, + struct flow_match_ip *out); +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_tcp(const struct flow_rule *rule, + struct flow_match_tcp *out); +void flow_rule_match_icmp(const struct flow_rule *rule, + struct flow_match_icmp *out); +void flow_rule_match_mpls(const struct flow_rule *rule, + struct flow_match_mpls *out); +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_enc_ip(const struct flow_rule *rule, + struct flow_match_ip *out); +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +void flow_rule_match_enc_opts(const struct flow_rule *rule, + struct flow_match_enc_opts *out); +void flow_rule_match_ct(const struct flow_rule *rule, + struct flow_match_ct *out); + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP, + FLOW_ACTION_TRAP, + FLOW_ACTION_GOTO, + FLOW_ACTION_REDIRECT, + FLOW_ACTION_MIRRED, + FLOW_ACTION_REDIRECT_INGRESS, + FLOW_ACTION_MIRRED_INGRESS, + FLOW_ACTION_VLAN_PUSH, + FLOW_ACTION_VLAN_POP, + FLOW_ACTION_VLAN_MANGLE, + FLOW_ACTION_TUNNEL_ENCAP, + FLOW_ACTION_TUNNEL_DECAP, + FLOW_ACTION_MANGLE, + FLOW_ACTION_ADD, + FLOW_ACTION_CSUM, + FLOW_ACTION_MARK, + FLOW_ACTION_PTYPE, + FLOW_ACTION_PRIORITY, + FLOW_ACTION_WAKE, + FLOW_ACTION_QUEUE, + FLOW_ACTION_SAMPLE, + FLOW_ACTION_POLICE, + FLOW_ACTION_CT, + FLOW_ACTION_CT_METADATA, + FLOW_ACTION_MPLS_PUSH, + FLOW_ACTION_MPLS_POP, + FLOW_ACTION_MPLS_MANGLE, + FLOW_ACTION_GATE, + NUM_FLOW_ACTIONS, +}; + + + + + +enum flow_action_mangle_base { + FLOW_ACT_MANGLE_UNSPEC = 0, + FLOW_ACT_MANGLE_HDR_TYPE_ETH, + FLOW_ACT_MANGLE_HDR_TYPE_IP4, + FLOW_ACT_MANGLE_HDR_TYPE_IP6, + FLOW_ACT_MANGLE_HDR_TYPE_TCP, + FLOW_ACT_MANGLE_HDR_TYPE_UDP, +}; + +enum flow_action_hw_stats_bit { + FLOW_ACTION_HW_STATS_IMMEDIATE_BIT, + FLOW_ACTION_HW_STATS_DELAYED_BIT, + FLOW_ACTION_HW_STATS_DISABLED_BIT, + + FLOW_ACTION_HW_STATS_NUM_BITS +}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_IMMEDIATE = + ((((1UL))) << (FLOW_ACTION_HW_STATS_IMMEDIATE_BIT)), + FLOW_ACTION_HW_STATS_DELAYED = ((((1UL))) << (FLOW_ACTION_HW_STATS_DELAYED_BIT)), + FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE | + FLOW_ACTION_HW_STATS_DELAYED, + FLOW_ACTION_HW_STATS_DISABLED = + ((((1UL))) << (FLOW_ACTION_HW_STATS_DISABLED_BIT)), + FLOW_ACTION_HW_STATS_DONT_CARE = ((((1UL))) << (FLOW_ACTION_HW_STATS_NUM_BITS)) - 1, +}; + +typedef void (*action_destr)(void *priv); + +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[]; +}; + +struct flow_action_cookie *flow_action_cookie_create(void *data, + unsigned int len, + gfp_t gfp); +void flow_action_cookie_destroy(struct flow_action_cookie *cookie); + +struct flow_action_entry { + enum flow_action_id id; + enum flow_action_hw_stats hw_stats; + action_destr destructor; + void *destructor_priv; + union { + u32 chain_index; + struct net_device *dev; + struct { + u16 vid; + __be16 proto; + u8 prio; + } vlan; + struct { + + enum flow_action_mangle_base htype; + u32 offset; + u32 mask; + u32 val; + } mangle; + struct ip_tunnel_info *tunnel; + u32 csum_flags; + u32 mark; + u16 ptype; + u32 priority; + struct { + u32 ctx; + u32 index; + u8 vf; + } queue; + struct { + struct psample_group *psample_group; + u32 rate; + u32 trunc_size; + bool truncate; + } sample; + struct { + s64 burst; + u64 rate_bytes_ps; + } police; + struct { + int action; + u16 zone; + struct nf_flowtable *flow_table; + } ct; + struct { + unsigned long cookie; + u32 mark; + u32 labels[4]; + } ct_metadata; + struct { + u32 label; + __be16 proto; + u8 tc; + u8 bos; + u8 ttl; + } mpls_push; + struct { + __be16 proto; + } mpls_pop; + struct { + u32 label; + u8 tc; + u8 bos; + u8 ttl; + } mpls_mangle; + struct { + u32 index; + s32 prio; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; + }; + struct flow_action_cookie *cookie; +}; + +struct flow_action { + unsigned int num_entries; + struct flow_action_entry entries[]; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool flow_action_has_entries(const struct flow_action *action) +{ + return action->num_entries; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool flow_offload_has_one_action(const struct flow_action *action) +{ + return action->num_entries == 1; +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +flow_action_mixed_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack) +{ + const struct flow_action_entry *action_entry; + u8 last_hw_stats = last_hw_stats; + int i; + + if (flow_offload_has_one_action(action)) + return true; + + for (i = 0, action_entry = &(action)->entries[0]; i < (action)->num_entries; action_entry = &(action)->entries[++i]) { + if (i && action_entry->hw_stats != last_hw_stats) { + do { static const char __msg[] = "io_uring" ": " "Mixing HW stats types for actions is not supported"; struct netlink_ext_ack *__extack = ((extack)); if (__extack) __extack->_msg = __msg; } while (0); + return false; + } + last_hw_stats = action_entry->hw_stats; + } + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct flow_action_entry * +flow_action_first_entry_get(const struct flow_action *action) +{ + ({ int __ret_warn_on = !!(!flow_action_has_entries(action)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1385)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/flow_offload.h"), "i" (327), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1386)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1387)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + return &action->entries[0]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +__flow_action_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack, + bool check_allow_bit, + enum flow_action_hw_stats_bit allow_bit) +{ + const struct flow_action_entry *action_entry; + + if (!flow_action_has_entries(action)) + return true; + if (!flow_action_mixed_hw_stats_check(action, extack)) + return false; + + action_entry = flow_action_first_entry_get(action); + + + ({ int __ret_warn_on = !!(!action_entry->hw_stats); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1388)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/flow_offload.h"), "i" (347), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1389)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1390)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + + if (!check_allow_bit && + ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) { + do { static const char __msg[] = "io_uring" ": " "Driver supports only default HW stats type \"any\""; struct netlink_ext_ack *__extack = ((extack)); if (__extack) __extack->_msg = __msg; } while (0); + return false; + } else if (check_allow_bit && + !(action_entry->hw_stats & ((((1UL))) << (allow_bit)))) { + do { static const char __msg[] = "io_uring" ": " "Driver does not support selected HW stats type"; struct netlink_ext_ack *__extack = ((extack)); if (__extack) __extack->_msg = __msg; } while (0); + return false; + } + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +flow_action_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack, + enum flow_action_hw_stats_bit allow_bit) +{ + return __flow_action_hw_stats_check(action, extack, true, allow_bit); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +flow_action_basic_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack) +{ + return __flow_action_hw_stats_check(action, extack, false, 0); +} + +struct flow_rule { + struct flow_match match; + struct flow_action action; +}; + +struct flow_rule *flow_rule_alloc(unsigned int num_actions); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} + +struct flow_stats { + u64 pkts; + u64 bytes; + u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flow_stats_update(struct flow_stats *flow_stats, + u64 bytes, u64 pkts, u64 lastused, + enum flow_action_hw_stats used_hw_stats) +{ + flow_stats->pkts += pkts; + flow_stats->bytes += bytes; + flow_stats->lastused = __builtin_choose_expr(((!!(sizeof((typeof((u64)(flow_stats->lastused)) *)1 == (typeof((u64)(lastused)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(flow_stats->lastused)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(lastused)) * 0l)) : (int *)8))))), (((u64)(flow_stats->lastused)) > ((u64)(lastused)) ? ((u64)(flow_stats->lastused)) : ((u64)(lastused))), ({ typeof((u64)(flow_stats->lastused)) __UNIQUE_ID___x1391 = ((u64)(flow_stats->lastused)); typeof((u64)(lastused)) __UNIQUE_ID___y1392 = ((u64)(lastused)); ((__UNIQUE_ID___x1391) > (__UNIQUE_ID___y1392) ? (__UNIQUE_ID___x1391) : (__UNIQUE_ID___y1392)); })); + + + + + ({ int __ret_warn_on = !!(used_hw_stats == FLOW_ACTION_HW_STATS_ANY); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1393)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/flow_offload.h"), "i" (408), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1394)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1395)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + flow_stats->used_hw_stats |= used_hw_stats; + flow_stats->used_hw_stats_valid = true; +} + +enum flow_block_command { + FLOW_BLOCK_BIND, + FLOW_BLOCK_UNBIND, +}; + +enum flow_block_binder_type { + FLOW_BLOCK_BINDER_TYPE_UNSPEC, + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, +}; + +struct flow_block { + struct list_head cb_list; +}; + +struct netlink_ext_ack; + +struct flow_block_offload { + enum flow_block_command command; + enum flow_block_binder_type binder_type; + bool block_shared; + bool unlocked_driver_cb; + struct net *net; + struct flow_block *block; + struct list_head cb_list; + struct list_head *driver_block_list; + struct netlink_ext_ack *extack; +}; + +enum tc_setup_type; +typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data, + void *cb_priv); + +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + enum flow_block_binder_type binder_type; + void *data; + void (*cleanup)(struct flow_block_cb *block_cb); +}; + +struct flow_block_cb { + struct list_head driver_list; + struct list_head list; + flow_setup_cb_t *cb; + void *cb_ident; + void *cb_priv; + void (*release)(void *cb_priv); + struct flow_block_indr indr; + unsigned int refcnt; +}; + +struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + void (*release)(void *cb_priv)); +void flow_block_cb_free(struct flow_block_cb *block_cb); + +struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, + flow_setup_cb_t *cb, void *cb_ident); + +void *flow_block_cb_priv(struct flow_block_cb *block_cb); +void flow_block_cb_incref(struct flow_block_cb *block_cb); +unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flow_block_cb_add(struct flow_block_cb *block_cb, + struct flow_block_offload *offload) +{ + list_add_tail(&block_cb->list, &offload->cb_list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flow_block_cb_remove(struct flow_block_cb *block_cb, + struct flow_block_offload *offload) +{ + list_move(&block_cb->list, &offload->cb_list); +} + +bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, + struct list_head *driver_block_list); + +int flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + flow_setup_cb_t *cb, + void *cb_ident, void *cb_priv, bool ingress_only); + +enum flow_cls_command { + FLOW_CLS_REPLACE, + FLOW_CLS_DESTROY, + FLOW_CLS_STATS, + FLOW_CLS_TMPLT_CREATE, + FLOW_CLS_TMPLT_DESTROY, +}; + +struct flow_cls_common_offload { + u32 chain_index; + __be16 protocol; + u32 prio; + struct netlink_ext_ack *extack; +}; + +struct flow_cls_offload { + struct flow_cls_common_offload common; + enum flow_cls_command command; + unsigned long cookie; + struct flow_rule *rule; + struct flow_stats stats; + u32 classid; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct flow_rule * +flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd) +{ + return flow_cmd->rule; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flow_block_init(struct flow_block *flow_block) +{ + INIT_LIST_HEAD(&flow_block->cb_list); +} + +typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, + enum tc_setup_type type, void *type_data); + +int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); +void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, + flow_setup_cb_t *setup_cb); +int flow_indr_dev_setup_offload(struct net_device *dev, + enum tc_setup_type type, void *data, + struct flow_block_offload *bo, + void (*cleanup)(struct flow_block_cb *block_cb)); +# 22 "./include/net/sch_generic.h" 2 + +struct Qdisc_ops; +struct qdisc_walker; +struct tcf_walker; +struct module; +struct bpf_flow_keys; + +struct qdisc_rate_table { + struct tc_ratespec rate; + u32 data[256]; + struct qdisc_rate_table *next; + int refcnt; +}; + +enum qdisc_state_t { + __QDISC_STATE_SCHED, + __QDISC_STATE_DEACTIVATED, +}; + +struct qdisc_size_table { + struct callback_head rcu; + struct list_head list; + struct tc_sizespec szopts; + int refcnt; + u16 data[]; +}; + + +struct qdisc_skb_head { + struct sk_buff *head; + struct sk_buff *tail; + __u32 qlen; + spinlock_t lock; +}; + +struct Qdisc { + int (*enqueue)(struct sk_buff *skb, + struct Qdisc *sch, + struct sk_buff **to_free); + struct sk_buff * (*dequeue)(struct Qdisc *sch); + unsigned int flags; +# 82 "./include/net/sch_generic.h" + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table *stab; + struct hlist_node hash; + u32 handle; + u32 parent; + + struct netdev_queue *dev_queue; + + struct net_rate_estimator *rate_est; + struct gnet_stats_basic_cpu *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + int padded; + refcount_t refcnt; + + + + + struct sk_buff_head gso_skb __attribute__((__aligned__((1 << (6))))); + struct qdisc_skb_head q; + struct gnet_stats_basic_packed bstats; + seqcount_t running; + struct gnet_stats_queue qstats; + unsigned long state; + struct Qdisc *next_sched; + struct sk_buff_head skb_bad_txq; + + spinlock_t busylock __attribute__((__aligned__((1 << (6))))); + spinlock_t seqlock; + + + bool empty; + struct callback_head rcu; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_refcount_inc(struct Qdisc *qdisc) +{ + if (qdisc->flags & 1) + return; + refcount_inc(&qdisc->refcnt); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) +{ + if (qdisc->flags & 1) + return qdisc; + if (refcount_inc_not_zero(&qdisc->refcnt)) + return qdisc; + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qdisc_is_running(struct Qdisc *qdisc) +{ + if (qdisc->flags & 0x100) + return spin_is_locked(&qdisc->seqlock); + return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qdisc_is_percpu_stats(const struct Qdisc *q) +{ + return q->flags & 0x20; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qdisc_is_empty(const struct Qdisc *qdisc) +{ + if (qdisc_is_percpu_stats(qdisc)) + return ({ do { extern void __compiletime_assert_1396(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(qdisc->empty) == sizeof(char) || sizeof(qdisc->empty) == sizeof(short) || sizeof(qdisc->empty) == sizeof(int) || sizeof(qdisc->empty) == sizeof(long)) || sizeof(qdisc->empty) == sizeof(long long))) __compiletime_assert_1396(); } while (0); ({ typeof( _Generic((qdisc->empty), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (qdisc->empty))) __x = (*(const volatile typeof( _Generic((qdisc->empty), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (qdisc->empty))) *)&(qdisc->empty)); do { } while (0); (typeof(qdisc->empty))__x; }); }); + return !({ do { extern void __compiletime_assert_1397(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(qdisc->q.qlen) == sizeof(char) || sizeof(qdisc->q.qlen) == sizeof(short) || sizeof(qdisc->q.qlen) == sizeof(int) || sizeof(qdisc->q.qlen) == sizeof(long)) || sizeof(qdisc->q.qlen) == sizeof(long long))) __compiletime_assert_1397(); } while (0); ({ typeof( _Generic((qdisc->q.qlen), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (qdisc->q.qlen))) __x = (*(const volatile typeof( _Generic((qdisc->q.qlen), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (qdisc->q.qlen))) *)&(qdisc->q.qlen)); do { } while (0); (typeof(qdisc->q.qlen))__x; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qdisc_run_begin(struct Qdisc *qdisc) +{ + if (qdisc->flags & 0x100) { + if (!spin_trylock(&qdisc->seqlock)) + return false; + do { do { extern void __compiletime_assert_1398(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(qdisc->empty) == sizeof(char) || sizeof(qdisc->empty) == sizeof(short) || sizeof(qdisc->empty) == sizeof(int) || sizeof(qdisc->empty) == sizeof(long)) || sizeof(qdisc->empty) == sizeof(long long))) __compiletime_assert_1398(); } while (0); do { *(volatile typeof(qdisc->empty) *)&(qdisc->empty) = (false); } while (0); } while (0); + } else if (qdisc_is_running(qdisc)) { + return false; + } + + + + raw_write_seqcount_begin(&qdisc->running); + lock_acquire(&qdisc->running.dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0)); + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_run_end(struct Qdisc *qdisc) +{ + write_seqcount_end(&qdisc->running); + if (qdisc->flags & 0x100) + spin_unlock(&qdisc->seqlock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qdisc_may_bulk(const struct Qdisc *qdisc) +{ + return qdisc->flags & 0x10; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_avail_bulklimit(const struct netdev_queue *txq) +{ + + + return dql_avail(&txq->dql); + + + +} + +struct Qdisc_class_ops { + unsigned int flags; + + struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); + int (*graft)(struct Qdisc *, unsigned long cl, + struct Qdisc *, struct Qdisc **, + struct netlink_ext_ack *extack); + struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); + void (*qlen_notify)(struct Qdisc *, unsigned long); + + + unsigned long (*find)(struct Qdisc *, u32 classid); + int (*change)(struct Qdisc *, u32, u32, + struct nlattr **, unsigned long *, + struct netlink_ext_ack *); + int (*delete)(struct Qdisc *, unsigned long); + void (*walk)(struct Qdisc *, struct qdisc_walker * arg); + + + struct tcf_block * (*tcf_block)(struct Qdisc *sch, + unsigned long arg, + struct netlink_ext_ack *extack); + unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, + u32 classid); + void (*unbind_tcf)(struct Qdisc *, unsigned long); + + + int (*dump)(struct Qdisc *, unsigned long, + struct sk_buff *skb, struct tcmsg*); + int (*dump_stats)(struct Qdisc *, unsigned long, + struct gnet_dump *); +}; + + + + +enum qdisc_class_ops_flags { + QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, +}; + +struct Qdisc_ops { + struct Qdisc_ops *next; + const struct Qdisc_class_ops *cl_ops; + char id[16]; + int priv_size; + unsigned int static_flags; + + int (*enqueue)(struct sk_buff *skb, + struct Qdisc *sch, + struct sk_buff **to_free); + struct sk_buff * (*dequeue)(struct Qdisc *); + struct sk_buff * (*peek)(struct Qdisc *); + + int (*init)(struct Qdisc *sch, struct nlattr *arg, + struct netlink_ext_ack *extack); + void (*reset)(struct Qdisc *); + void (*destroy)(struct Qdisc *); + int (*change)(struct Qdisc *sch, + struct nlattr *arg, + struct netlink_ext_ack *extack); + void (*attach)(struct Qdisc *sch); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); + + int (*dump)(struct Qdisc *, struct sk_buff *); + int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + + void (*ingress_block_set)(struct Qdisc *sch, + u32 block_index); + void (*egress_block_set)(struct Qdisc *sch, + u32 block_index); + u32 (*ingress_block_get)(struct Qdisc *sch); + u32 (*egress_block_get)(struct Qdisc *sch); + + struct module *owner; +}; + + +struct tcf_result { + union { + struct { + unsigned long class; + u32 classid; + }; + const struct tcf_proto *goto_tp; + + + struct { + bool ingress; + struct gnet_stats_queue *qstats; + }; + }; +}; + +struct tcf_chain; + +struct tcf_proto_ops { + struct list_head head; + char kind[16]; + + int (*classify)(struct sk_buff *, + const struct tcf_proto *, + struct tcf_result *); + int (*init)(struct tcf_proto*); + void (*destroy)(struct tcf_proto *tp, bool rtnl_held, + struct netlink_ext_ack *extack); + + void* (*get)(struct tcf_proto*, u32 handle); + void (*put)(struct tcf_proto *tp, void *f); + int (*change)(struct net *net, struct sk_buff *, + struct tcf_proto*, unsigned long, + u32 handle, struct nlattr **, + void **, bool, bool, + struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *tp, void *arg, + bool *last, bool rtnl_held, + struct netlink_ext_ack *); + bool (*delete_empty)(struct tcf_proto *tp); + void (*walk)(struct tcf_proto *tp, + struct tcf_walker *arg, bool rtnl_held); + int (*reoffload)(struct tcf_proto *tp, bool add, + flow_setup_cb_t *cb, void *cb_priv, + struct netlink_ext_ack *extack); + void (*hw_add)(struct tcf_proto *tp, + void *type_data); + void (*hw_del)(struct tcf_proto *tp, + void *type_data); + void (*bind_class)(void *, u32, unsigned long, + void *, unsigned long); + void * (*tmplt_create)(struct net *net, + struct tcf_chain *chain, + struct nlattr **tca, + struct netlink_ext_ack *extack); + void (*tmplt_destroy)(void *tmplt_priv); + + + int (*dump)(struct net*, struct tcf_proto*, void *, + struct sk_buff *skb, struct tcmsg*, + bool); + int (*terse_dump)(struct net *net, + struct tcf_proto *tp, void *fh, + struct sk_buff *skb, + struct tcmsg *t, bool rtnl_held); + int (*tmplt_dump)(struct sk_buff *skb, + struct net *net, + void *tmplt_priv); + + struct module *owner; + int flags; +}; + + + + + +enum tcf_proto_ops_flags { + TCF_PROTO_OPS_DOIT_UNLOCKED = 1, +}; + +struct tcf_proto { + + struct tcf_proto *next; + void *root; + + + int (*classify)(struct sk_buff *, + const struct tcf_proto *, + struct tcf_result *); + __be16 protocol; + + + u32 prio; + void *data; + const struct tcf_proto_ops *ops; + struct tcf_chain *chain; + + + + spinlock_t lock; + bool deleting; + refcount_t refcnt; + struct callback_head rcu; + struct hlist_node destroy_ht_node; +}; + +struct qdisc_skb_cb { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + + unsigned char data[20]; +}; + +typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); + +struct tcf_chain { + + struct mutex filter_chain_lock; + struct tcf_proto *filter_chain; + struct list_head list; + struct tcf_block *block; + u32 index; + unsigned int refcnt; + unsigned int action_refcnt; + bool explicitly_created; + bool flushing; + const struct tcf_proto_ops *tmplt_ops; + void *tmplt_priv; + struct callback_head rcu; +}; + +struct tcf_block { + + + + struct mutex lock; + struct list_head chain_list; + u32 index; + u32 classid; + refcount_t refcnt; + struct net *net; + struct Qdisc *q; + struct rw_semaphore cb_lock; + struct flow_block flow_block; + struct list_head owner_list; + bool keep_dst; + atomic_t offloadcnt; + unsigned int nooffloaddevcnt; + unsigned int lockeddevcnt; + struct { + struct tcf_chain *chain; + struct list_head filter_chain_list; + } chain0; + struct callback_head rcu; + struct hlist_head proto_destroy_ht[1 << (7)]; + struct mutex proto_destroy_lock; +}; + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) +{ + return lock_is_held(&(&chain->filter_chain_lock)->dep_map); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) +{ + return lock_is_held(&(&tp->lock)->dep_map); +} +# 462 "./include/net/sch_generic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) +{ + struct qdisc_skb_cb *qcb; + + do { extern void __compiletime_assert_1399(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz"))); if (!(!(sizeof(skb->cb) < __builtin_offsetof(struct qdisc_skb_cb, data) + sz))) __compiletime_assert_1399(); } while (0); + do { extern void __compiletime_assert_1400(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(qcb->data) < sz"))); if (!(!(sizeof(qcb->data) < sz))) __compiletime_assert_1400(); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_qlen_cpu(const struct Qdisc *q) +{ + return ({ do { const void *__vpp_verify = (typeof((q->cpu_qstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (q->cpu_qstats)); (typeof(*(q->cpu_qstats)) *)tcp_ptr__; }); })->qlen; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_qlen(const struct Qdisc *q) +{ + return q->q.qlen; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_qlen_sum(const struct Qdisc *q) +{ + __u32 qlen = q->qstats.qlen; + int i; + + if (qdisc_is_percpu_stats(q)) { + for (((i)) = -1; ((i)) = cpumask_next(((i)), (((const struct cpumask *)&__cpu_possible_mask))), ((i)) < nr_cpu_ids;) + qlen += ({ do { const void *__vpp_verify = (typeof((q->cpu_qstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((q->cpu_qstats))) *)((q->cpu_qstats)))); (typeof((typeof(*((q->cpu_qstats))) *)((q->cpu_qstats)))) (__ptr + (((__per_cpu_offset[(i)])))); }); })->qlen; + } else { + qlen += q->q.qlen; + } + + return qlen; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) +{ + return (struct qdisc_skb_cb *)skb->cb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *qdisc_lock(struct Qdisc *qdisc) +{ + return &qdisc->q.lock; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct Qdisc *qdisc_root(const struct Qdisc *qdisc) +{ + struct Qdisc *q = ({ typeof(*(qdisc->dev_queue->qdisc)) *________p1 = (typeof(*(qdisc->dev_queue->qdisc)) *)({ do { extern void __compiletime_assert_1401(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((qdisc->dev_queue->qdisc)) == sizeof(char) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(short) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(int) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(long)) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(long long))) __compiletime_assert_1401(); } while (0); ({ typeof( _Generic(((qdisc->dev_queue->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((qdisc->dev_queue->qdisc)))) __x = (*(const volatile typeof( _Generic(((qdisc->dev_queue->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((qdisc->dev_queue->qdisc)))) *)&((qdisc->dev_queue->qdisc))); do { } while (0); (typeof((qdisc->dev_queue->qdisc)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rtnl_is_held()) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/sch_generic.h", 507, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(qdisc->dev_queue->qdisc)) *)(________p1)); }); + + return q; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) +{ + return ({ typeof(*(qdisc->dev_queue->qdisc)) *________p1 = (typeof(*(qdisc->dev_queue->qdisc)) *)({ do { extern void __compiletime_assert_1402(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((qdisc->dev_queue->qdisc)) == sizeof(char) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(short) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(int) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(long)) || sizeof((qdisc->dev_queue->qdisc)) == sizeof(long long))) __compiletime_assert_1402(); } while (0); ({ typeof( _Generic(((qdisc->dev_queue->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((qdisc->dev_queue->qdisc)))) __x = (*(const volatile typeof( _Generic(((qdisc->dev_queue->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((qdisc->dev_queue->qdisc)))) *)&((qdisc->dev_queue->qdisc))); do { } while (0); (typeof((qdisc->dev_queue->qdisc)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_bh_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/sch_generic.h", 514, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(qdisc->dev_queue->qdisc)) *)(________p1)); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) +{ + return qdisc->dev_queue->qdisc_sleeping; +} +# 533 "./include/net/sch_generic.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) +{ + struct Qdisc *root = qdisc_root(qdisc); + + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1403)); }); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/net/sch_generic.h", 537); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1404)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sch_generic.h"), "i" (537), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1405)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1406)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1407)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + return qdisc_lock(root); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) +{ + struct Qdisc *root = qdisc_root_sleeping(qdisc); + + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1408)); }); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/net/sch_generic.h", 545); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1409)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sch_generic.h"), "i" (545), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1410)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1411)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1412)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + return qdisc_lock(root); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) +{ + struct Qdisc *root = qdisc_root_sleeping(qdisc); + + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1413)); }); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/net/sch_generic.h", 553); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1414)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sch_generic.h"), "i" (553), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1415)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1416)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1417)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + return &root->running; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net_device *qdisc_dev(const struct Qdisc *qdisc) +{ + return qdisc->dev_queue->dev; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sch_tree_lock(const struct Qdisc *q) +{ + spin_lock_bh(qdisc_root_sleeping_lock(q)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sch_tree_unlock(const struct Qdisc *q) +{ + spin_unlock_bh(qdisc_root_sleeping_lock(q)); +} + +extern struct Qdisc noop_qdisc; +extern struct Qdisc_ops noop_qdisc_ops; +extern struct Qdisc_ops pfifo_fast_ops; +extern struct Qdisc_ops mq_qdisc_ops; +extern struct Qdisc_ops noqueue_qdisc_ops; +extern const struct Qdisc_ops *default_qdisc_ops; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct Qdisc_ops * +get_default_qdisc_ops(const struct net_device *dev, int ntx) +{ + return ntx < dev->real_num_tx_queues ? + default_qdisc_ops : &pfifo_fast_ops; +} + +struct Qdisc_class_common { + u32 classid; + struct hlist_node hnode; +}; + +struct Qdisc_class_hash { + struct hlist_head *hash; + unsigned int hashsize; + unsigned int hashmask; + unsigned int hashelems; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int qdisc_class_hash(u32 id, u32 mask) +{ + id ^= id >> 8; + id ^= id >> 4; + return id & mask; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct Qdisc_class_common * +qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) +{ + struct Qdisc_class_common *cl; + unsigned int h; + + if (!id) + return ((void *)0); + + h = qdisc_class_hash(id, hash->hashmask); + for (cl = ({ typeof((&hash->hash[h])->first) ____ptr = ((&hash->hash[h])->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1418(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(cl)) *)0)->hnode)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1418(); } while (0); ((typeof(*(cl)) *)(__mptr - __builtin_offsetof(typeof(*(cl)), hnode))); }) : ((void *)0); }); cl; cl = ({ typeof((cl)->hnode.next) ____ptr = ((cl)->hnode.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1419(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(cl)) *)0)->hnode)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1419(); } while (0); ((typeof(*(cl)) *)(__mptr - __builtin_offsetof(typeof(*(cl)), hnode))); }) : ((void *)0); })) { + if (cl->classid == id) + return cl; + } + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int tc_classid_to_hwtc(struct net_device *dev, u32 classid) +{ + u32 hwtc = ((classid)&(0x0000FFFFU)) - 0xFFE0U; + + return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -22; +} + +int qdisc_class_hash_init(struct Qdisc_class_hash *); +void qdisc_class_hash_insert(struct Qdisc_class_hash *, + struct Qdisc_class_common *); +void qdisc_class_hash_remove(struct Qdisc_class_hash *, + struct Qdisc_class_common *); +void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); +void qdisc_class_hash_destroy(struct Qdisc_class_hash *); + +int dev_qdisc_change_tx_queue_len(struct net_device *dev); +void dev_init_scheduler(struct net_device *dev); +void dev_shutdown(struct net_device *dev); +void dev_activate(struct net_device *dev); +void dev_deactivate(struct net_device *dev); +void dev_deactivate_many(struct list_head *head); +struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc); +void qdisc_reset(struct Qdisc *qdisc); +void qdisc_put(struct Qdisc *qdisc); +void qdisc_put_unlocked(struct Qdisc *qdisc); +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); + +int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, + void *type_data); +void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, + struct Qdisc *new, struct Qdisc *old, + enum tc_setup_type type, void *type_data, + struct netlink_ext_ack *extack); +# 672 "./include/net/sch_generic.h" +struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, + const struct Qdisc_ops *ops, + struct netlink_ext_ack *extack); +void qdisc_free(struct Qdisc *qdisc); +struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, + const struct Qdisc_ops *ops, u32 parentid, + struct netlink_ext_ack *extack); +void __qdisc_calculate_pkt_len(struct sk_buff *skb, + const struct qdisc_size_table *stab); +int skb_do_redirect(struct sk_buff *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_at_tc_ingress(const struct sk_buff *skb) +{ + + return skb->tc_at_ingress; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_skip_tc_classify(struct sk_buff *skb) +{ + + if (skb->tc_skip_classify) { + skb->tc_skip_classify = 0; + return true; + } + + return false; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) +{ + struct Qdisc *qdisc; + + for (; i < dev->num_tx_queues; i++) { + qdisc = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_rtnl_is_held())))) { __warned = true; lockdep_rcu_suspicious("include/net/sch_generic.h", 709, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(netdev_get_tx_queue(dev, i)->qdisc)) *)((netdev_get_tx_queue(dev, i)->qdisc))); }); + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } + } +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qdisc_all_tx_empty(const struct net_device *dev) +{ + unsigned int i; + + rcu_read_lock(); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + const struct Qdisc *q = ({ typeof(*(txq->qdisc)) *________p1 = (typeof(*(txq->qdisc)) *)({ do { extern void __compiletime_assert_1420(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((txq->qdisc)) == sizeof(char) || sizeof((txq->qdisc)) == sizeof(short) || sizeof((txq->qdisc)) == sizeof(int) || sizeof((txq->qdisc)) == sizeof(long)) || sizeof((txq->qdisc)) == sizeof(long long))) __compiletime_assert_1420(); } while (0); ({ typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) __x = (*(const volatile typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) *)&((txq->qdisc))); do { } while (0); (typeof((txq->qdisc)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/sch_generic.h", 726, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(txq->qdisc)) *)(________p1)); }); + + if (!qdisc_is_empty(q)) { + rcu_read_unlock(); + return false; + } + } + rcu_read_unlock(); + return true; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qdisc_tx_changing(const struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + if (({ typeof(*(txq->qdisc)) *_________p1 = (typeof(*(txq->qdisc)) *)({ do { extern void __compiletime_assert_1421(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((txq->qdisc)) == sizeof(char) || sizeof((txq->qdisc)) == sizeof(short) || sizeof((txq->qdisc)) == sizeof(int) || sizeof((txq->qdisc)) == sizeof(long)) || sizeof((txq->qdisc)) == sizeof(long long))) __compiletime_assert_1421(); } while (0); ({ typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) __x = (*(const volatile typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) *)&((txq->qdisc))); do { } while (0); (typeof((txq->qdisc)))__x; }); }); ; ((typeof(*(txq->qdisc)) *)(_________p1)); }) != txq->qdisc_sleeping) + return true; + } + return false; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool qdisc_tx_is_noop(const struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + if (({ typeof(*(txq->qdisc)) *_________p1 = (typeof(*(txq->qdisc)) *)({ do { extern void __compiletime_assert_1422(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((txq->qdisc)) == sizeof(char) || sizeof((txq->qdisc)) == sizeof(short) || sizeof((txq->qdisc)) == sizeof(int) || sizeof((txq->qdisc)) == sizeof(long)) || sizeof((txq->qdisc)) == sizeof(long long))) __compiletime_assert_1422(); } while (0); ({ typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) __x = (*(const volatile typeof( _Generic(((txq->qdisc)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((txq->qdisc)))) *)&((txq->qdisc))); do { } while (0); (typeof((txq->qdisc)))__x; }); }); ; ((typeof(*(txq->qdisc)) *)(_________p1)); }) != &noop_qdisc) + return false; + } + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int qdisc_pkt_len(const struct sk_buff *skb) +{ + return qdisc_skb_cb(skb)->pkt_len; +} + + +enum net_xmit_qdisc_t { + __NET_XMIT_STOLEN = 0x00010000, + __NET_XMIT_BYPASS = 0x00020000, +}; + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_calculate_pkt_len(struct sk_buff *skb, + const struct Qdisc *sch) +{ + + struct qdisc_size_table *stab = ({ typeof(*(sch->stab)) *________p1 = (typeof(*(sch->stab)) *)({ do { extern void __compiletime_assert_1423(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((sch->stab)) == sizeof(char) || sizeof((sch->stab)) == sizeof(short) || sizeof((sch->stab)) == sizeof(int) || sizeof((sch->stab)) == sizeof(long)) || sizeof((sch->stab)) == sizeof(long long))) __compiletime_assert_1423(); } while (0); ({ typeof( _Generic(((sch->stab)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sch->stab)))) __x = (*(const volatile typeof( _Generic(((sch->stab)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sch->stab)))) *)&((sch->stab))); do { } while (0); (typeof((sch->stab)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_bh_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/sch_generic.h", 784, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(sch->stab)) *)(________p1)); }); + + if (stab) + __qdisc_calculate_pkt_len(skb, stab); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + qdisc_calculate_pkt_len(skb, sch); + return sch->enqueue(skb, sch, to_free); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void _bstats_update(struct gnet_stats_basic_packed *bstats, + __u64 bytes, __u32 packets) +{ + bstats->bytes += bytes; + bstats->packets += packets; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bstats_update(struct gnet_stats_basic_packed *bstats, + const struct sk_buff *skb) +{ + _bstats_update(bstats, + qdisc_pkt_len(skb), + skb_is_gso(skb) ? ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs : 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, + __u64 bytes, __u32 packets) +{ + u64_stats_update_begin(&bstats->syncp); + _bstats_update(&bstats->bstats, bytes, packets); + u64_stats_update_end(&bstats->syncp); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats, + const struct sk_buff *skb) +{ + u64_stats_update_begin(&bstats->syncp); + bstats_update(&bstats->bstats, skb); + u64_stats_update_end(&bstats->syncp); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_bstats_cpu_update(struct Qdisc *sch, + const struct sk_buff *skb) +{ + bstats_cpu_update(({ do { const void *__vpp_verify = (typeof((sch->cpu_bstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (sch->cpu_bstats)); (typeof(*(sch->cpu_bstats)) *)tcp_ptr__; }); }), skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_bstats_update(struct Qdisc *sch, + const struct sk_buff *skb) +{ + bstats_update(&sch->bstats, skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_backlog_dec(struct Qdisc *sch, + const struct sk_buff *skb) +{ + sch->qstats.backlog -= qdisc_pkt_len(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, + const struct sk_buff *skb) +{ + do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->backlog)) { case 1: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) && ((-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) == 1 || (-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) == -1)) ? (int)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) && ((-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) == 1 || (-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) == -1)) ? (int)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) && ((-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) == 1 || (-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) == -1)) ? (int)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) && ((-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) == 1 || (-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) == -1)) ? (int)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(-(typeof(sch->cpu_qstats->backlog))(qdisc_pkt_len(skb))))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_backlog_inc(struct Qdisc *sch, + const struct sk_buff *skb) +{ + sch->qstats.backlog += qdisc_pkt_len(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, + const struct sk_buff *skb) +{ + do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->backlog)) { case 1: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(qdisc_pkt_len(skb)) && ((qdisc_pkt_len(skb)) == 1 || (qdisc_pkt_len(skb)) == -1)) ? (int)(qdisc_pkt_len(skb)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (qdisc_pkt_len(skb)); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(qdisc_pkt_len(skb)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(qdisc_pkt_len(skb)) && ((qdisc_pkt_len(skb)) == 1 || (qdisc_pkt_len(skb)) == -1)) ? (int)(qdisc_pkt_len(skb)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (qdisc_pkt_len(skb)); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(qdisc_pkt_len(skb)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(qdisc_pkt_len(skb)) && ((qdisc_pkt_len(skb)) == 1 || (qdisc_pkt_len(skb)) == -1)) ? (int)(qdisc_pkt_len(skb)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (qdisc_pkt_len(skb)); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(qdisc_pkt_len(skb)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(qdisc_pkt_len(skb)) && ((qdisc_pkt_len(skb)) == 1 || (qdisc_pkt_len(skb)) == -1)) ? (int)(qdisc_pkt_len(skb)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (qdisc_pkt_len(skb)); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(qdisc_pkt_len(skb)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(qdisc_pkt_len(skb)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) +{ + do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->qlen)) { case 1: do { typedef typeof((sch->cpu_qstats->qlen)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->qlen))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((sch->cpu_qstats->qlen)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->qlen))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((sch->cpu_qstats->qlen)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->qlen))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((sch->cpu_qstats->qlen)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->qlen))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) +{ + do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->qlen)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->qlen)) { case 1: do { typedef typeof((sch->cpu_qstats->qlen)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(sch->cpu_qstats->qlen))(1)) && ((-(typeof(sch->cpu_qstats->qlen))(1)) == 1 || (-(typeof(sch->cpu_qstats->qlen))(1)) == -1)) ? (int)(-(typeof(sch->cpu_qstats->qlen))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(sch->cpu_qstats->qlen))(1)); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->qlen))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "qi" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "re" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((sch->cpu_qstats->qlen)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(sch->cpu_qstats->qlen))(1)) && ((-(typeof(sch->cpu_qstats->qlen))(1)) == 1 || (-(typeof(sch->cpu_qstats->qlen))(1)) == -1)) ? (int)(-(typeof(sch->cpu_qstats->qlen))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(sch->cpu_qstats->qlen))(1)); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->qlen))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "qi" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "re" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((sch->cpu_qstats->qlen)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(sch->cpu_qstats->qlen))(1)) && ((-(typeof(sch->cpu_qstats->qlen))(1)) == 1 || (-(typeof(sch->cpu_qstats->qlen))(1)) == -1)) ? (int)(-(typeof(sch->cpu_qstats->qlen))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(sch->cpu_qstats->qlen))(1)); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->qlen))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "qi" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "re" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((sch->cpu_qstats->qlen)) pao_T__; const int pao_ID__ = (__builtin_constant_p(-(typeof(sch->cpu_qstats->qlen))(1)) && ((-(typeof(sch->cpu_qstats->qlen))(1)) == 1 || (-(typeof(sch->cpu_qstats->qlen))(1)) == -1)) ? (int)(-(typeof(sch->cpu_qstats->qlen))(1)) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (-(typeof(sch->cpu_qstats->qlen))(1)); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->qlen))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "qi" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "ri" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->qlen)) : "re" ((pao_T__)(-(typeof(sch->cpu_qstats->qlen))(1)))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) +{ + do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->requeues)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->requeues)) { case 1: do { typedef typeof((sch->cpu_qstats->requeues)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->requeues))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((sch->cpu_qstats->requeues)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->requeues))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((sch->cpu_qstats->requeues)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->requeues))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((sch->cpu_qstats->requeues)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->requeues))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->requeues)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __qdisc_qstats_drop(struct Qdisc *sch, int count) +{ + sch->qstats.drops += count; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qstats_drop_inc(struct gnet_stats_queue *qstats) +{ + qstats->drops++; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qstats_overlimit_inc(struct gnet_stats_queue *qstats) +{ + qstats->overlimits++; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_drop(struct Qdisc *sch) +{ + qstats_drop_inc(&sch->qstats); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_cpu_drop(struct Qdisc *sch) +{ + do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->drops)) { case 1: do { typedef typeof((sch->cpu_qstats->drops)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->drops))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((sch->cpu_qstats->drops)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->drops))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((sch->cpu_qstats->drops)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->drops))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((sch->cpu_qstats->drops)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->drops))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->drops)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_overlimit(struct Qdisc *sch) +{ + sch->qstats.overlimits++; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) +{ + __u32 qlen = qdisc_qlen_sum(sch); + + return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, + __u32 *backlog) +{ + struct gnet_stats_queue qstats = { 0 }; + __u32 len = qdisc_qlen_sum(sch); + + __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len); + *qlen = qstats.qlen; + *backlog = qstats.backlog; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_tree_flush_backlog(struct Qdisc *sch) +{ + __u32 qlen, backlog; + + qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); + qdisc_tree_reduce_backlog(sch, qlen, backlog); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_purge_queue(struct Qdisc *sch) +{ + __u32 qlen, backlog; + + qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); + qdisc_reset(sch); + qdisc_tree_reduce_backlog(sch, qlen, backlog); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_skb_head_init(struct qdisc_skb_head *qh) +{ + qh->head = ((void *)0); + qh->tail = ((void *)0); + qh->qlen = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __qdisc_enqueue_tail(struct sk_buff *skb, + struct qdisc_skb_head *qh) +{ + struct sk_buff *last = qh->tail; + + if (last) { + skb->next = ((void *)0); + last->next = skb; + qh->tail = skb; + } else { + qh->tail = skb; + qh->head = skb; + } + qh->qlen++; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) +{ + __qdisc_enqueue_tail(skb, &sch->q); + qdisc_qstats_backlog_inc(sch, skb); + return 0x00; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __qdisc_enqueue_head(struct sk_buff *skb, + struct qdisc_skb_head *qh) +{ + skb->next = qh->head; + + if (!qh->head) + qh->tail = skb; + qh->head = skb; + qh->qlen++; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) +{ + struct sk_buff *skb = qh->head; + + if (__builtin_expect(!!(skb != ((void *)0)), 1)) { + qh->head = skb->next; + qh->qlen--; + if (qh->head == ((void *)0)) + qh->tail = ((void *)0); + skb->next = ((void *)0); + } + + return skb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) +{ + struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); + + if (__builtin_expect(!!(skb != ((void *)0)), 1)) { + qdisc_qstats_backlog_dec(sch, skb); + qdisc_bstats_update(sch, skb); + } + + return skb; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) +{ + skb->next = *to_free; + *to_free = skb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __qdisc_drop_all(struct sk_buff *skb, + struct sk_buff **to_free) +{ + if (skb->prev) + skb->prev->next = *to_free; + else + skb->next = *to_free; + *to_free = skb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, + struct qdisc_skb_head *qh, + struct sk_buff **to_free) +{ + struct sk_buff *skb = __qdisc_dequeue_head(qh); + + if (__builtin_expect(!!(skb != ((void *)0)), 1)) { + unsigned int len = qdisc_pkt_len(skb); + + qdisc_qstats_backlog_dec(sch, skb); + __qdisc_drop(skb, to_free); + return len; + } + + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int qdisc_queue_drop_head(struct Qdisc *sch, + struct sk_buff **to_free) +{ + return __qdisc_queue_drop_head(sch, &sch->q, to_free); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *qdisc_peek_head(struct Qdisc *sch) +{ + const struct qdisc_skb_head *qh = &sch->q; + + return qh->head; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) +{ + struct sk_buff *skb = skb_peek(&sch->gso_skb); + + + if (!skb) { + skb = sch->dequeue(sch); + + if (skb) { + __skb_queue_head(&sch->gso_skb, skb); + + qdisc_qstats_backlog_inc(sch, skb); + sch->q.qlen++; + } + } + + return skb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_update_stats_at_dequeue(struct Qdisc *sch, + struct sk_buff *skb) +{ + if (qdisc_is_percpu_stats(sch)) { + qdisc_qstats_cpu_backlog_dec(sch, skb); + qdisc_bstats_cpu_update(sch, skb); + qdisc_qstats_cpu_qlen_dec(sch); + } else { + qdisc_qstats_backlog_dec(sch, skb); + qdisc_bstats_update(sch, skb); + sch->q.qlen--; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_update_stats_at_enqueue(struct Qdisc *sch, + unsigned int pkt_len) +{ + if (qdisc_is_percpu_stats(sch)) { + qdisc_qstats_cpu_qlen_inc(sch); + do { do { const void *__vpp_verify = (typeof((&(sch->cpu_qstats->backlog)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(sch->cpu_qstats->backlog)) { case 1: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(pkt_len) && ((pkt_len) == 1 || (pkt_len) == -1)) ? (int)(pkt_len) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (pkt_len); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(pkt_len))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(pkt_len))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(pkt_len))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(pkt_len))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(pkt_len) && ((pkt_len) == 1 || (pkt_len) == -1)) ? (int)(pkt_len) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (pkt_len); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(pkt_len))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(pkt_len))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(pkt_len))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(pkt_len))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(pkt_len) && ((pkt_len) == 1 || (pkt_len) == -1)) ? (int)(pkt_len) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (pkt_len); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(pkt_len))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(pkt_len))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(pkt_len))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(pkt_len))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((sch->cpu_qstats->backlog)) pao_T__; const int pao_ID__ = (__builtin_constant_p(pkt_len) && ((pkt_len) == 1 || (pkt_len) == -1)) ? (int)(pkt_len) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (pkt_len); (void)pao_tmp__; } switch (sizeof((sch->cpu_qstats->backlog))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "qi" ((pao_T__)(pkt_len))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(pkt_len))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "ri" ((pao_T__)(pkt_len))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((sch->cpu_qstats->backlog)) : "re" ((pao_T__)(pkt_len))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); + } else { + sch->qstats.backlog += pkt_len; + sch->q.qlen++; + } +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) +{ + struct sk_buff *skb = skb_peek(&sch->gso_skb); + + if (skb) { + skb = __skb_dequeue(&sch->gso_skb); + if (qdisc_is_percpu_stats(sch)) { + qdisc_qstats_cpu_backlog_dec(sch, skb); + qdisc_qstats_cpu_qlen_dec(sch); + } else { + qdisc_qstats_backlog_dec(sch, skb); + sch->q.qlen--; + } + } else { + skb = sch->dequeue(sch); + } + + return skb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __qdisc_reset_queue(struct qdisc_skb_head *qh) +{ + + + + + ({ static bool __attribute__((__section__(".data.once"))) __warned; int __ret_warn_once = !!(!rtnl_is_locked()); if (__builtin_expect(!!(__ret_warn_once && !__warned), 0)) { __warned = true; ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1424)); }); __warn_printk("RTNL: assertion failed at %s (%d)\n", "include/net/sch_generic.h", 1135); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1425)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sch_generic.h"), "i" (1135), "i" ((1 << 0)|((1 << 3) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1426)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1427)); }); } while (0); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1428)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } __builtin_expect(!!(__ret_warn_once), 0); }); + if (qh->qlen) { + rtnl_kfree_skbs(qh->head, qh->tail); + + qh->head = ((void *)0); + qh->tail = ((void *)0); + qh->qlen = 0; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void qdisc_reset_queue(struct Qdisc *sch) +{ + __qdisc_reset_queue(&sch->q); + sch->qstats.backlog = 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, + struct Qdisc **pold) +{ + struct Qdisc *old; + + sch_tree_lock(sch); + old = *pold; + *pold = new; + if (old != ((void *)0)) + qdisc_tree_flush_backlog(old); + sch_tree_unlock(sch); + + return old; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) +{ + rtnl_kfree_skbs(skb, skb); + qdisc_qstats_drop(sch); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + __qdisc_drop(skb, to_free); + qdisc_qstats_cpu_drop(sch); + + return 0x01; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + __qdisc_drop(skb, to_free); + qdisc_qstats_drop(sch); + + return 0x01; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + __qdisc_drop_all(skb, to_free); + qdisc_qstats_drop(sch); + + return 0x01; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) +{ + int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; + if (slot < 0) + slot = 0; + slot >>= rtab->rate.cell_log; + if (slot > 255) + return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; + return rtab->data[slot]; +} + +struct psched_ratecfg { + u64 rate_bytes_ps; + u32 mult; + u16 overhead; + u8 linklayer; + u8 shift; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 psched_l2t_ns(const struct psched_ratecfg *r, + unsigned int len) +{ + len += r->overhead; + + if (__builtin_expect(!!(r->linklayer == TC_LINKLAYER_ATM), 0)) + return ((u64)((((len) + (48) - 1) / (48))*53) * r->mult) >> r->shift; + + return ((u64)len * r->mult) >> r->shift; +} + +void psched_ratecfg_precompute(struct psched_ratecfg *r, + const struct tc_ratespec *conf, + u64 rate64); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void psched_ratecfg_getrate(struct tc_ratespec *res, + const struct psched_ratecfg *r) +{ + memset(res, 0, sizeof(*res)); + + + + + + res->rate = __builtin_choose_expr(((!!(sizeof((typeof((u64)(r->rate_bytes_ps)) *)1 == (typeof((u64)(~0U)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(r->rate_bytes_ps)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u64)(~0U)) * 0l)) : (int *)8))))), (((u64)(r->rate_bytes_ps)) < ((u64)(~0U)) ? ((u64)(r->rate_bytes_ps)) : ((u64)(~0U))), ({ typeof((u64)(r->rate_bytes_ps)) __UNIQUE_ID___x1429 = ((u64)(r->rate_bytes_ps)); typeof((u64)(~0U)) __UNIQUE_ID___y1430 = ((u64)(~0U)); ((__UNIQUE_ID___x1429) < (__UNIQUE_ID___y1430) ? (__UNIQUE_ID___x1429) : (__UNIQUE_ID___y1430)); })); + + res->overhead = r->overhead; + res->linklayer = (r->linklayer & 0x0F); +} + + + + +struct mini_Qdisc { + struct tcf_proto *filter_list; + struct tcf_block *block; + struct gnet_stats_basic_cpu *cpu_bstats; + struct gnet_stats_queue *cpu_qstats; + struct callback_head rcu; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq, + const struct sk_buff *skb) +{ + bstats_cpu_update(({ do { const void *__vpp_verify = (typeof((miniq->cpu_bstats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (miniq->cpu_bstats)); (typeof(*(miniq->cpu_bstats)) *)tcp_ptr__; }); }), skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq) +{ + do { do { const void *__vpp_verify = (typeof((&(miniq->cpu_qstats->drops)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(miniq->cpu_qstats->drops)) { case 1: do { typedef typeof((miniq->cpu_qstats->drops)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((miniq->cpu_qstats->drops))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((miniq->cpu_qstats->drops)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((miniq->cpu_qstats->drops))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((miniq->cpu_qstats->drops)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((miniq->cpu_qstats->drops))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((miniq->cpu_qstats->drops)) pao_T__; const int pao_ID__ = (__builtin_constant_p(1) && ((1) == 1 || (1) == -1)) ? (int)(1) : 0; if (0) { pao_T__ pao_tmp__; pao_tmp__ = (1); (void)pao_tmp__; } switch (sizeof((miniq->cpu_qstats->drops))) { case 1: if (pao_ID__ == 1) asm volatile ("incb ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decb ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addb %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "qi" ((pao_T__)(1))); break; case 2: if (pao_ID__ == 1) asm volatile ("incw ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decw ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addw %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 4: if (pao_ID__ == 1) asm volatile ("incl ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decl ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addl %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "ri" ((pao_T__)(1))); break; case 8: if (pao_ID__ == 1) asm volatile ("incq ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else if (pao_ID__ == -1) asm volatile ("decq ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops))); else asm volatile ("addq %1, ""%%""gs"":" "%" "0" : "+m" ((miniq->cpu_qstats->drops)) : "re" ((pao_T__)(1))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +struct mini_Qdisc_pair { + struct mini_Qdisc miniq1; + struct mini_Qdisc miniq2; + struct mini_Qdisc **p_miniq; +}; + +void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, + struct tcf_proto *tp_head); +void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, + struct mini_Qdisc **p_miniq); +void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, + struct tcf_block *block); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) +{ + return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb); +} +# 26 "./include/linux/filter.h" 2 + + +# 1 "./include/uapi/linux/filter.h" 1 +# 24 "./include/uapi/linux/filter.h" +struct sock_filter { + __u16 code; + __u8 jt; + __u8 jf; + __u32 k; +}; + +struct sock_fprog { + unsigned short len; + struct sock_filter *filter; +}; +# 29 "./include/linux/filter.h" 2 + + +struct sk_buff; +struct sock; +struct seccomp_data; +struct bpf_prog_aux; +struct xdp_rxq_info; +struct xdp_buff; +struct sock_reuseport; +struct ctl_table; +struct ctl_table_header; +# 176 "./include/linux/filter.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool insn_is_zext(const struct bpf_insn *insn) +{ + return insn->code == (0x04 | 0xb0 | 0x08) && insn->imm == 1; +} +# 507 "./include/linux/filter.h" +struct compat_sock_fprog { + u16 len; + compat_uptr_t filter; +}; + + +struct sock_fprog_kern { + u16 len; + struct sock_filter *filter; +}; + + + + +struct bpf_binary_header { + u32 pages; + u8 image[] __attribute__((__aligned__(8))); +}; + +struct bpf_prog { + u16 pages; + u16 jited:1, + jit_requested:1, + gpl_compatible:1, + cb_access:1, + dst_needed:1, + blinded:1, + is_func:1, + kprobe_override:1, + has_callchain_buf:1, + enforce_expected_attach_type:1; + enum bpf_prog_type type; + enum bpf_attach_type expected_attach_type; + u32 len; + u32 jited_len; + u8 tag[8]; + struct bpf_prog_aux *aux; + struct sock_fprog_kern *orig_prog; + unsigned int (*bpf_func)(const void *ctx, + const struct bpf_insn *insn); + + struct sock_filter insns[0]; + struct bpf_insn insnsi[]; +}; + +struct sk_filter { + refcount_t refcnt; + struct callback_head rcu; + struct bpf_prog *prog; +}; + +extern struct static_key_false bpf_stats_enabled_key; +# 591 "./include/linux/filter.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, + const void *ctx) +{ + u32 ret; + + migrate_disable(); + ret = ({ u32 ret; do { __cant_sleep("include/linux/filter.h", 597, 0); } while (0); if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&bpf_stats_enabled_key), struct static_key_true)) branch = arch_static_branch_jump(&(&bpf_stats_enabled_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&bpf_stats_enabled_key), struct static_key_false)) branch = arch_static_branch(&(&bpf_stats_enabled_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); })) { struct bpf_prog_stats *stats; u64 start = sched_clock(); ret = bpf_dispatcher_nop_func(ctx, (prog)->insnsi, (prog)->bpf_func); stats = ({ do { const void *__vpp_verify = (typeof((prog->aux->stats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (prog->aux->stats)); (typeof(*(prog->aux->stats)) *)tcp_ptr__; }); }); u64_stats_update_begin(&stats->syncp); stats->cnt++; stats->nsecs += sched_clock() - start; u64_stats_update_end(&stats->syncp); } else { ret = bpf_dispatcher_nop_func(ctx, (prog)->insnsi, (prog)->bpf_func); } ret; }); + migrate_enable(); + return ret; +} + + + +struct bpf_skb_data_end { + struct qdisc_skb_cb qdisc_cb; + void *data_meta; + void *data_end; +}; + +struct bpf_redirect_info { + u32 flags; + u32 tgt_index; + void *tgt_value; + struct bpf_map *map; + u32 kern_flags; +}; + +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_bpf_redirect_info; extern __attribute__((section(".data..percpu" ""))) __typeof__(struct bpf_redirect_info) bpf_redirect_info; +# 629 "./include/linux/filter.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_compute_data_pointers(struct sk_buff *skb) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + do { extern void __compiletime_assert_1431(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(*cb) > sizeof_field(struct sk_buff, cb)"))); if (!(!(sizeof(*cb) > sizeof((((struct sk_buff *)0)->cb))))) __compiletime_assert_1431(); } while (0); + cb->data_meta = skb->data - skb_metadata_len(skb); + cb->data_end = skb->data + skb_headlen(skb); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_compute_and_save_data_end( + struct sk_buff *skb, void **saved_data_end) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + *saved_data_end = cb->data_end; + cb->data_end = skb->data + skb_headlen(skb); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_restore_data_end( + struct sk_buff *skb, void *saved_data_end) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + cb->data_end = saved_data_end; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u8 *bpf_skb_cb(struct sk_buff *skb) +{ +# 671 "./include/linux/filter.h" + do { extern void __compiletime_assert_1432(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN"))); if (!(!(sizeof((((struct __sk_buff *)0)->cb)) != 20))) __compiletime_assert_1432(); } while (0); + do { extern void __compiletime_assert_1433(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof_field(struct __sk_buff, cb) != sizeof_field(struct qdisc_skb_cb, data)"))); if (!(!(sizeof((((struct __sk_buff *)0)->cb)) != sizeof((((struct qdisc_skb_cb *)0)->data))))) __compiletime_assert_1433(); } while (0) + ; + + return qdisc_skb_cb(skb)->data; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u8 *cb_data = bpf_skb_cb(skb); + u8 cb_saved[20]; + u32 res; + + if (__builtin_expect(!!(prog->cb_access), 0)) { + memcpy(cb_saved, cb_data, sizeof(cb_saved)); + memset(cb_data, 0, sizeof(cb_saved)); + } + + res = ({ u32 ret; do { __cant_sleep("include/linux/filter.h", 691, 0); } while (0); if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&bpf_stats_enabled_key), struct static_key_true)) branch = arch_static_branch_jump(&(&bpf_stats_enabled_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&bpf_stats_enabled_key), struct static_key_false)) branch = arch_static_branch(&(&bpf_stats_enabled_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); })) { struct bpf_prog_stats *stats; u64 start = sched_clock(); ret = bpf_dispatcher_nop_func(skb, (prog)->insnsi, (prog)->bpf_func); stats = ({ do { const void *__vpp_verify = (typeof((prog->aux->stats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (prog->aux->stats)); (typeof(*(prog->aux->stats)) *)tcp_ptr__; }); }); u64_stats_update_begin(&stats->syncp); stats->cnt++; stats->nsecs += sched_clock() - start; u64_stats_update_end(&stats->syncp); } else { ret = bpf_dispatcher_nop_func(skb, (prog)->insnsi, (prog)->bpf_func); } ret; }); + + if (__builtin_expect(!!(prog->cb_access), 0)) + memcpy(cb_data, cb_saved, sizeof(cb_saved)); + + return res; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u32 res; + + migrate_disable(); + res = __bpf_prog_run_save_cb(prog, skb); + migrate_enable(); + return res; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u8 *cb_data = bpf_skb_cb(skb); + u32 res; + + if (__builtin_expect(!!(prog->cb_access), 0)) + memset(cb_data, 0, 20); + + res = bpf_prog_run_pin_on_cpu(prog, skb); + return res; +} + +unsigned int bpf_dispatcher_xdp_func( const void *ctx, const struct bpf_insn *insnsi, unsigned int (*bpf_func)(const void *, const struct bpf_insn *)); extern struct bpf_dispatcher bpf_dispatcher_xdp; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) +{ + + + + + + + return ({ u32 ret; do { __cant_sleep("include/linux/filter.h", 734, 0); } while (0); if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&bpf_stats_enabled_key), struct static_key_true)) branch = arch_static_branch_jump(&(&bpf_stats_enabled_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&bpf_stats_enabled_key), struct static_key_false)) branch = arch_static_branch(&(&bpf_stats_enabled_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); })) { struct bpf_prog_stats *stats; u64 start = sched_clock(); ret = bpf_dispatcher_xdp_func(xdp, (prog)->insnsi, (prog)->bpf_func); stats = ({ do { const void *__vpp_verify = (typeof((prog->aux->stats) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (prog->aux->stats)); (typeof(*(prog->aux->stats)) *)tcp_ptr__; }); }); u64_stats_update_begin(&stats->syncp); stats->cnt++; stats->nsecs += sched_clock() - start; u64_stats_update_end(&stats->syncp); } else { ret = bpf_dispatcher_xdp_func(xdp, (prog)->insnsi, (prog)->bpf_func); } ret; }); +} + +void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 bpf_prog_insn_size(const struct bpf_prog *prog) +{ + return prog->len * sizeof(struct bpf_insn); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) +{ + return ((((bpf_prog_insn_size(prog) + sizeof(__be64) + 1)-1) | ((__typeof__(bpf_prog_insn_size(prog) + sizeof(__be64) + 1))((64)-1)))+1) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int bpf_prog_size(unsigned int proglen) +{ + return __builtin_choose_expr(((!!(sizeof((typeof(sizeof(struct bpf_prog)) *)1 == (typeof(__builtin_offsetof(struct bpf_prog, insns[proglen])) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(sizeof(struct bpf_prog)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(__builtin_offsetof(struct bpf_prog, insns[proglen])) * 0l)) : (int *)8))))), ((sizeof(struct bpf_prog)) > (__builtin_offsetof(struct bpf_prog, insns[proglen])) ? (sizeof(struct bpf_prog)) : (__builtin_offsetof(struct bpf_prog, insns[proglen]))), ({ typeof(sizeof(struct bpf_prog)) __UNIQUE_ID___x1434 = (sizeof(struct bpf_prog)); typeof(__builtin_offsetof(struct bpf_prog, insns[proglen])) __UNIQUE_ID___y1435 = (__builtin_offsetof(struct bpf_prog, insns[proglen])); ((__UNIQUE_ID___x1434) > (__UNIQUE_ID___y1435) ? (__UNIQUE_ID___x1434) : (__UNIQUE_ID___y1435)); })) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_prog_was_classic(const struct bpf_prog *prog) +{ + + + + + + return prog->type == BPF_PROG_TYPE_UNSPEC; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 bpf_ctx_off_adjust_machine(u32 size) +{ + const u32 size_machine = sizeof(unsigned long); + + if (size > size_machine && size % size_machine == 0) + size = size_machine; + + return size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) +{ + return size <= size_default && (size & (size - 1)) == 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u8 +bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) +{ + u8 access_off = off & (size_default - 1); + + + return access_off; + + + +} +# 802 "./include/linux/filter.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_prog_lock_ro(struct bpf_prog *fp) +{ + + + + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +{ + set_vm_flush_reset_perms(hdr); + set_memory_ro((unsigned long)hdr, hdr->pages); + set_memory_x((unsigned long)hdr, hdr->pages); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct bpf_binary_header * +bpf_jit_binary_hdr(const struct bpf_prog *fp) +{ + unsigned long real_start = (unsigned long)fp->bpf_func; + unsigned long addr = real_start & (~(((1UL) << 12)-1)); + + return (void *)addr; +} + +int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_filter(struct sock *sk, struct sk_buff *skb) +{ + return sk_filter_trim_cap(sk, skb, 1); +} + +struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); +void bpf_prog_free(struct bpf_prog *fp); + +bool bpf_opcode_in_insntable(u8 code); + +void bpf_prog_free_linfo(struct bpf_prog *prog); +void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, + const u32 *insn_to_jit_off); +int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); +void bpf_prog_free_jited_linfo(struct bpf_prog *prog); +void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); + +struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); +struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); +struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags); +void __bpf_prog_free(struct bpf_prog *fp); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_prog_unlock_free(struct bpf_prog *fp) +{ + __bpf_prog_free(fp); +} + +typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, + unsigned int flen); + +int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); +int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, + bpf_aux_classic_check_t trans, bool save_orig); +void bpf_prog_destroy(struct bpf_prog *fp); + +int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_attach_bpf(u32 ufd, struct sock *sk); +int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); +void sk_reuseport_prog_free(struct bpf_prog *prog); +int sk_detach_filter(struct sock *sk); +int sk_get_filter(struct sock *sk, struct sock_filter *filter, + unsigned int len); + +bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); +void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); + +u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + + + + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); +void bpf_jit_compile(struct bpf_prog *prog); +bool bpf_jit_needs_zext(void); +bool bpf_helper_changes_pkt_data(void *func); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_dump_raw_ok(void) +{ + + + + return kallsyms_show_value() == 1; +} + +struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, + const struct bpf_insn *patch, u32 len); +int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); + +void bpf_clear_redirect_map(struct bpf_map *map); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool xdp_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = ({ do { const void *__vpp_verify = (typeof((&bpf_redirect_info) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&bpf_redirect_info)); (typeof(*(&bpf_redirect_info)) *)tcp_ptr__; }); }); + + return ri->kern_flags & ((((1UL))) << (0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xdp_set_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = ({ do { const void *__vpp_verify = (typeof((&bpf_redirect_info) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&bpf_redirect_info)); (typeof(*(&bpf_redirect_info)) *)tcp_ptr__; }); }); + + ri->kern_flags |= ((((1UL))) << (0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void xdp_clear_return_frame_no_direct(void) +{ + struct bpf_redirect_info *ri = ({ do { const void *__vpp_verify = (typeof((&bpf_redirect_info) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&bpf_redirect_info)); (typeof(*(&bpf_redirect_info)) *)tcp_ptr__; }); }); + + ri->kern_flags &= ~((((1UL))) << (0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int xdp_ok_fwd_dev(const struct net_device *fwd, + unsigned int pktlen) +{ + unsigned int len; + + if (__builtin_expect(!!(!(fwd->flags & IFF_UP)), 0)) + return -100; + + len = fwd->mtu + fwd->hard_header_len + 4; + if (pktlen > len) + return -90; + + return 0; +} + + + + + + + +int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, + struct xdp_buff *xdp, struct bpf_prog *prog); +int xdp_do_redirect(struct net_device *dev, + struct xdp_buff *xdp, + struct bpf_prog *prog); +void xdp_do_flush(void); + + + + + + + +void bpf_warn_invalid_xdp_action(u32 act); + + +struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, + struct bpf_prog *prog, struct sk_buff *skb, + u32 hash); +# 973 "./include/linux/filter.h" +extern int bpf_jit_enable; +extern int bpf_jit_harden; +extern int bpf_jit_kallsyms; +extern long bpf_jit_limit; + +typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); + +struct bpf_binary_header * +bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns); +void bpf_jit_binary_free(struct bpf_binary_header *hdr); +u64 bpf_jit_alloc_exec_limit(void); +void *bpf_jit_alloc_exec(unsigned long size); +void bpf_jit_free_exec(void *addr); +void bpf_jit_free(struct bpf_prog *fp); + +int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, + struct bpf_jit_poke_descriptor *poke); + +int bpf_jit_get_func_addr(const struct bpf_prog *prog, + const struct bpf_insn *insn, bool extra_pass, + u64 *func_addr, bool *func_addr_fixed); + +struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); +void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_jit_dump(unsigned int flen, unsigned int proglen, + u32 pass, void *image) +{ + printk("\001" "3" "flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, proglen, pass, image, get_current()->comm, task_pid_nr(get_current())) + ; + + if (image) + print_hex_dump("\001" "3", "JIT code: ", DUMP_PREFIX_OFFSET, + 16, 1, image, proglen, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_jit_is_ebpf(void) +{ + + return true; + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool ebpf_jit_enabled(void) +{ + return bpf_jit_enable && bpf_jit_is_ebpf(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) +{ + return fp->jited && bpf_jit_is_ebpf(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_jit_blinding_enabled(struct bpf_prog *prog) +{ + + + + + if (!bpf_jit_is_ebpf()) + return false; + if (!prog->jit_requested) + return false; + if (!bpf_jit_harden) + return false; + if (bpf_jit_harden == 1 && capable(21)) + return false; + + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_jit_kallsyms_enabled(void) +{ + + + + if (bpf_jit_harden) + return false; + if (!bpf_jit_kallsyms) + return false; + if (bpf_jit_kallsyms == 1) + return true; + + return false; +} + +const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym); +bool is_bpf_text_address(unsigned long addr); +int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + const char *ret = __bpf_address_lookup(addr, size, off, sym); + + if (ret && modname) + *modname = ((void *)0); + return ret; +} + +void bpf_prog_kallsyms_add(struct bpf_prog *fp); +void bpf_prog_kallsyms_del(struct bpf_prog *fp); +# 1152 "./include/linux/filter.h" +void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bpf_needs_clear_a(const struct sock_filter *first) +{ + switch (first->code) { + case 0x06 | 0x00: + case 0x00 | 0x00 | 0x80: + return false; + + case 0x00 | 0x00 | 0x20: + case 0x00 | 0x08 | 0x20: + case 0x00 | 0x10 | 0x20: + if (first->k == (-0x1000) + 40) + return true; + return false; + + default: + return true; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u16 bpf_anc_helper(const struct sock_filter *ftest) +{ + do { if (__builtin_expect(!!(ftest->code & ((((1UL))) << (15))), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1436)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/filter.h"), "i" (1177), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1437)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + switch (ftest->code) { + case 0x00 | 0x00 | 0x20: + case 0x00 | 0x08 | 0x20: + case 0x00 | 0x10 | 0x20: + + + switch (ftest->k) { + case (-0x1000) + 0: return ((((1UL))) << (15)) | 0; + case (-0x1000) + 4: return ((((1UL))) << (15)) | 4; + case (-0x1000) + 8: return ((((1UL))) << (15)) | 8; + case (-0x1000) + 12: return ((((1UL))) << (15)) | 12; + case (-0x1000) + 16: return ((((1UL))) << (15)) | 16; + case (-0x1000) + 20: return ((((1UL))) << (15)) | 20; + case (-0x1000) + 24: return ((((1UL))) << (15)) | 24; + case (-0x1000) + 28: return ((((1UL))) << (15)) | 28; + case (-0x1000) + 32: return ((((1UL))) << (15)) | 32; + case (-0x1000) + 36: return ((((1UL))) << (15)) | 36; + case (-0x1000) + 40: return ((((1UL))) << (15)) | 40; + case (-0x1000) + 44: return ((((1UL))) << (15)) | 44; + case (-0x1000) + 48: return ((((1UL))) << (15)) | 48; + case (-0x1000) + 52: return ((((1UL))) << (15)) | 52; + case (-0x1000) + 56: return ((((1UL))) << (15)) | 56; + case (-0x1000) + 60: return ((((1UL))) << (15)) | 60; + } + + default: + return ftest->code; + } +} + +void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, + int k, unsigned int size); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *bpf_load_pointer(const struct sk_buff *skb, int k, + unsigned int size, void *buffer) +{ + if (k >= 0) + return skb_header_pointer(skb, k, size, buffer); + + return bpf_internal_load_pointer_neg_helper(skb, k, size); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int bpf_tell_extensions(void) +{ + return 64; +} + +struct bpf_sock_addr_kern { + struct sock *sk; + struct sockaddr *uaddr; + + + + + u64 tmp_reg; + void *t_ctx; +}; + +struct bpf_sock_ops_kern { + struct sock *sk; + u32 op; + union { + u32 args[4]; + u32 reply; + u32 replylong[4]; + }; + u32 is_fullsock; + u64 temp; +# 1255 "./include/linux/filter.h" +}; + +struct bpf_sysctl_kern { + struct ctl_table_header *head; + struct ctl_table *table; + void *cur_val; + size_t cur_len; + void *new_val; + size_t new_len; + int new_updated; + int write; + loff_t *ppos; + + u64 tmp_reg; +}; + +struct bpf_sockopt_kern { + struct sock *sk; + u8 *optval; + u8 *optval_end; + s32 level; + s32 optname; + s32 optlen; + s32 retval; +}; +# 60 "./include/net/sock.h" 2 +# 1 "./include/linux/rculist_nulls.h" 1 +# 33 "./include/linux/rculist_nulls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) +{ + if (!hlist_nulls_unhashed(n)) { + __hlist_nulls_del(n); + do { do { extern void __compiletime_assert_1438(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_1438(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *)0)); } while (0); } while (0); + } +} +# 74 "./include/linux/rculist_nulls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_nulls_del_rcu(struct hlist_nulls_node *n) +{ + __hlist_nulls_del(n); + do { do { extern void __compiletime_assert_1439(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_1439(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (((void *) 0x122 + (0xdead000000000000UL))); } while (0); } while (0); +} +# 99 "./include/linux/rculist_nulls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *first = h->first; + + n->next = first; + do { do { extern void __compiletime_assert_1440(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->pprev) == sizeof(char) || sizeof(n->pprev) == sizeof(short) || sizeof(n->pprev) == sizeof(int) || sizeof(n->pprev) == sizeof(long)) || sizeof(n->pprev) == sizeof(long long))) __compiletime_assert_1440(); } while (0); do { *(volatile typeof(n->pprev) *)&(n->pprev) = (&h->first); } while (0); } while (0); + do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1441(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(char) || sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(short) || sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(int) || sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(long)) || sizeof(((*((struct hlist_nulls_node **)&(h)->first)))) == sizeof(long long))) __compiletime_assert_1441(); } while (0); do { *(volatile typeof(((*((struct hlist_nulls_node **)&(h)->first)))) *)&(((*((struct hlist_nulls_node **)&(h)->first)))) = ((typeof((*((struct hlist_nulls_node **)&(h)->first))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1442(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(char) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(short) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(int) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(long)))) __compiletime_assert_1442(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1443(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(char) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(short) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(int) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(long)) || sizeof(*&(*((struct hlist_nulls_node **)&(h)->first))) == sizeof(long long))) __compiletime_assert_1443(); } while (0); do { *(volatile typeof(*&(*((struct hlist_nulls_node **)&(h)->first))) *)&(*&(*((struct hlist_nulls_node **)&(h)->first))) = ((typeof(*((typeof((*((struct hlist_nulls_node **)&(h)->first))))_r_a_p__v)) *)((typeof((*((struct hlist_nulls_node **)&(h)->first))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + if (!is_a_nulls(first)) + do { do { extern void __compiletime_assert_1444(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(first->pprev) == sizeof(char) || sizeof(first->pprev) == sizeof(short) || sizeof(first->pprev) == sizeof(int) || sizeof(first->pprev) == sizeof(long)) || sizeof(first->pprev) == sizeof(long long))) __compiletime_assert_1444(); } while (0); do { *(volatile typeof(first->pprev) *)&(first->pprev) = (&n->next); } while (0); } while (0); +} +# 130 "./include/linux/rculist_nulls.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *i, *last = ((void *)0); + + + for (i = h->first; !is_a_nulls(i); i = i->next) + last = i; + + if (last) { + n->next = last->next; + n->pprev = &last->next; + do { uintptr_t _r_a_p__v = (uintptr_t)(n); ; if (__builtin_constant_p(n) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1445(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(char) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(short) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(int) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(long)) || sizeof(((*((struct hlist_node **)(&(last)->next))))) == sizeof(long long))) __compiletime_assert_1445(); } while (0); do { *(volatile typeof(((*((struct hlist_node **)(&(last)->next))))) *)&(((*((struct hlist_node **)(&(last)->next))))) = ((typeof((*((struct hlist_node **)(&(last)->next)))))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1446(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)))) __compiletime_assert_1446(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1447(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(char) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(short) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(int) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long)) || sizeof(*&(*((struct hlist_node **)(&(last)->next)))) == sizeof(long long))) __compiletime_assert_1447(); } while (0); do { *(volatile typeof(*&(*((struct hlist_node **)(&(last)->next)))) *)&(*&(*((struct hlist_node **)(&(last)->next)))) = ((typeof(*((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)) *)((typeof((*((struct hlist_node **)(&(last)->next)))))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + } else { + hlist_nulls_add_head_rcu(n, h); + } +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hlist_nulls_add_fake(struct hlist_nulls_node *n) +{ + n->pprev = &n->next; + n->next = (struct hlist_nulls_node *)(1UL | (((long)((void *)0)) << 1)); +} +# 61 "./include/net/sock.h" 2 + + + + +# 1 "./include/net/dst.h" 1 +# 19 "./include/net/dst.h" +# 1 "./include/net/neighbour.h" 1 +# 41 "./include/net/neighbour.h" +struct neighbour; + +enum { + NEIGH_VAR_MCAST_PROBES, + NEIGH_VAR_UCAST_PROBES, + NEIGH_VAR_APP_PROBES, + NEIGH_VAR_MCAST_REPROBES, + NEIGH_VAR_RETRANS_TIME, + NEIGH_VAR_BASE_REACHABLE_TIME, + NEIGH_VAR_DELAY_PROBE_TIME, + NEIGH_VAR_GC_STALETIME, + NEIGH_VAR_QUEUE_LEN_BYTES, + NEIGH_VAR_PROXY_QLEN, + NEIGH_VAR_ANYCAST_DELAY, + NEIGH_VAR_PROXY_DELAY, + NEIGH_VAR_LOCKTIME, + + + NEIGH_VAR_QUEUE_LEN, + NEIGH_VAR_RETRANS_TIME_MS, + NEIGH_VAR_BASE_REACHABLE_TIME_MS, + + NEIGH_VAR_GC_INTERVAL, + NEIGH_VAR_GC_THRESH1, + NEIGH_VAR_GC_THRESH2, + NEIGH_VAR_GC_THRESH3, + NEIGH_VAR_MAX +}; + +struct neigh_parms { + possible_net_t net; + struct net_device *dev; + struct list_head list; + int (*neigh_setup)(struct neighbour *); + struct neigh_table *tbl; + + void *sysctl_table; + + int dead; + refcount_t refcnt; + struct callback_head callback_head; + + int reachable_time; + int data[(NEIGH_VAR_LOCKTIME + 1)]; + unsigned long data_state[((((NEIGH_VAR_LOCKTIME + 1)) + ((sizeof(long) * 8)) - 1) / ((sizeof(long) * 8)))]; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void neigh_var_set(struct neigh_parms *p, int index, int val) +{ + set_bit(index, p->data_state); + p->data[index] = val; +} +# 102 "./include/net/neighbour.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void neigh_parms_data_state_setall(struct neigh_parms *p) +{ + bitmap_fill(p->data_state, (NEIGH_VAR_LOCKTIME + 1)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void neigh_parms_data_state_cleanall(struct neigh_parms *p) +{ + bitmap_zero(p->data_state, (NEIGH_VAR_LOCKTIME + 1)); +} + +struct neigh_statistics { + unsigned long allocs; + unsigned long destroys; + unsigned long hash_grows; + + unsigned long res_failed; + + unsigned long lookups; + unsigned long hits; + + unsigned long rcv_probes_mcast; + unsigned long rcv_probes_ucast; + + unsigned long periodic_gc_runs; + unsigned long forced_gc_runs; + + unsigned long unres_discards; + unsigned long table_fulls; +}; + + + +struct neighbour { + struct neighbour *next; + struct neigh_table *tbl; + struct neigh_parms *parms; + unsigned long confirmed; + unsigned long updated; + rwlock_t lock; + refcount_t refcnt; + unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; + struct timer_list timer; + unsigned long used; + atomic_t probes; + __u8 flags; + __u8 nud_state; + __u8 type; + __u8 dead; + u8 protocol; + seqlock_t ha_lock; + unsigned char ha[((((32)) + ((typeof((32)))((sizeof(unsigned long))) - 1)) & ~((typeof((32)))((sizeof(unsigned long))) - 1))] __attribute__((__aligned__(8))); + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); + const struct neigh_ops *ops; + struct list_head gc_list; + struct callback_head rcu; + struct net_device *dev; + u8 primary_key[0]; +} __attribute__((__designated_init__)); + +struct neigh_ops { + int family; + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); +}; + +struct pneigh_entry { + struct pneigh_entry *next; + possible_net_t net; + struct net_device *dev; + u8 flags; + u8 protocol; + u8 key[]; +}; + + + + + + + +struct neigh_hash_table { + struct neighbour **hash_buckets; + unsigned int hash_shift; + __u32 hash_rnd[4]; + struct callback_head rcu; +}; + + +struct neigh_table { + int family; + unsigned int entry_size; + unsigned int key_len; + __be16 protocol; + __u32 (*hash)(const void *pkey, + const struct net_device *dev, + __u32 *hash_rnd); + bool (*key_eq)(const struct neighbour *, const void *pkey); + int (*constructor)(struct neighbour *); + int (*pconstructor)(struct pneigh_entry *); + void (*pdestructor)(struct pneigh_entry *); + void (*proxy_redo)(struct sk_buff *skb); + bool (*allow_add)(const struct net_device *dev, + struct netlink_ext_ack *extack); + char *id; + struct neigh_parms parms; + struct list_head parms_list; + int gc_interval; + int gc_thresh1; + int gc_thresh2; + int gc_thresh3; + unsigned long last_flush; + struct delayed_work gc_work; + struct timer_list proxy_timer; + struct sk_buff_head proxy_queue; + atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; + rwlock_t lock; + unsigned long last_rand; + struct neigh_statistics *stats; + struct neigh_hash_table *nht; + struct pneigh_entry **phash_buckets; +}; + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_DN_TABLE = 2, + NEIGH_NR_TABLES, + NEIGH_LINK_TABLE = NEIGH_NR_TABLES +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int neigh_parms_family(struct neigh_parms *p) +{ + return p->tbl->family; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void *neighbour_priv(const struct neighbour *n) +{ + return (char *)n + n->tbl->entry_size; +} +# 259 "./include/net/neighbour.h" +extern const struct nla_policy nda_policy[]; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool neigh_key_eq16(const struct neighbour *n, const void *pkey) +{ + return *(const u16 *)n->primary_key == *(const u16 *)pkey; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool neigh_key_eq32(const struct neighbour *n, const void *pkey) +{ + return *(const u32 *)n->primary_key == *(const u32 *)pkey; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool neigh_key_eq128(const struct neighbour *n, const void *pkey) +{ + const u32 *n32 = (const u32 *)n->primary_key; + const u32 *p32 = pkey; + + return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | + (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neighbour *___neigh_lookup_noref( + struct neigh_table *tbl, + bool (*key_eq)(const struct neighbour *n, const void *pkey), + __u32 (*hash)(const void *pkey, + const struct net_device *dev, + __u32 *hash_rnd), + const void *pkey, + struct net_device *dev) +{ + struct neigh_hash_table *nht = ({ typeof(*(tbl->nht)) *________p1 = (typeof(*(tbl->nht)) *)({ do { extern void __compiletime_assert_1448(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((tbl->nht)) == sizeof(char) || sizeof((tbl->nht)) == sizeof(short) || sizeof((tbl->nht)) == sizeof(int) || sizeof((tbl->nht)) == sizeof(long)) || sizeof((tbl->nht)) == sizeof(long long))) __compiletime_assert_1448(); } while (0); ({ typeof( _Generic(((tbl->nht)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->nht)))) __x = (*(const volatile typeof( _Generic(((tbl->nht)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((tbl->nht)))) *)&((tbl->nht))); do { } while (0); (typeof((tbl->nht)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_bh_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/neighbour.h", 289, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(tbl->nht)) *)(________p1)); }); + struct neighbour *n; + u32 hash_val; + + hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); + for (n = ({ typeof(*(nht->hash_buckets[hash_val])) *________p1 = (typeof(*(nht->hash_buckets[hash_val])) *)({ do { extern void __compiletime_assert_1449(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((nht->hash_buckets[hash_val])) == sizeof(char) || sizeof((nht->hash_buckets[hash_val])) == sizeof(short) || sizeof((nht->hash_buckets[hash_val])) == sizeof(int) || sizeof((nht->hash_buckets[hash_val])) == sizeof(long)) || sizeof((nht->hash_buckets[hash_val])) == sizeof(long long))) __compiletime_assert_1449(); } while (0); ({ typeof( _Generic(((nht->hash_buckets[hash_val])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nht->hash_buckets[hash_val])))) __x = (*(const volatile typeof( _Generic(((nht->hash_buckets[hash_val])), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((nht->hash_buckets[hash_val])))) *)&((nht->hash_buckets[hash_val]))); do { } while (0); (typeof((nht->hash_buckets[hash_val])))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_bh_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/neighbour.h", 294, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(nht->hash_buckets[hash_val])) *)(________p1)); }); + n != ((void *)0); + n = ({ typeof(*(n->next)) *________p1 = (typeof(*(n->next)) *)({ do { extern void __compiletime_assert_1450(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((n->next)) == sizeof(char) || sizeof((n->next)) == sizeof(short) || sizeof((n->next)) == sizeof(int) || sizeof((n->next)) == sizeof(long)) || sizeof((n->next)) == sizeof(long long))) __compiletime_assert_1450(); } while (0); ({ typeof( _Generic(((n->next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((n->next)))) __x = (*(const volatile typeof( _Generic(((n->next)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((n->next)))) *)&((n->next))); do { } while (0); (typeof((n->next)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_bh_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/neighbour.h", 296, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(n->next)) *)(________p1)); })) { + if (n->dev == dev && key_eq(n, pkey)) + return n; + } + + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, + const void *pkey, + struct net_device *dev) +{ + return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); +} + +void neigh_table_init(int index, struct neigh_table *tbl); +int neigh_table_clear(int index, struct neigh_table *tbl); +struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, + struct net_device *dev); +struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, + const void *pkey); +struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, + struct net_device *dev, bool want_ref); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neighbour *neigh_create(struct neigh_table *tbl, + const void *pkey, + struct net_device *dev) +{ + return __neigh_create(tbl, pkey, dev, true); +} +void neigh_destroy(struct neighbour *neigh); +int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); +int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, + u32 nlmsg_pid); +void __neigh_set_probe_once(struct neighbour *neigh); +bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl); +void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); +int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); +int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev); +int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); +int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); +int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); +struct neighbour *neigh_event_ns(struct neigh_table *tbl, + u8 *lladdr, void *saddr, + struct net_device *dev); + +struct neigh_parms *neigh_parms_alloc(struct net_device *dev, + struct neigh_table *tbl); +void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct net *neigh_parms_net(const struct neigh_parms *parms) +{ + return read_pnet(&parms->net); +} + +unsigned long neigh_rand_reach_time(unsigned long base); + +void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, + struct sk_buff *skb); +struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, + const void *key, struct net_device *dev, + int creat); +struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, + const void *key, struct net_device *dev); +int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, + struct net_device *dev); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct net *pneigh_net(const struct pneigh_entry *pneigh) +{ + return read_pnet(&pneigh->net); +} + +void neigh_app_ns(struct neighbour *n); +void neigh_for_each(struct neigh_table *tbl, + void (*cb)(struct neighbour *, void *), void *cookie); +void __neigh_for_each_release(struct neigh_table *tbl, + int (*cb)(struct neighbour *)); +int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *); +void pneigh_for_each(struct neigh_table *tbl, + void (*cb)(struct pneigh_entry *)); + +struct neigh_seq_state { + struct seq_net_private p; + struct neigh_table *tbl; + struct neigh_hash_table *nht; + void *(*neigh_sub_iter)(struct neigh_seq_state *state, + struct neighbour *n, loff_t *pos); + unsigned int bucket; + unsigned int flags; + + + +}; +void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, + unsigned int); +void *neigh_seq_next(struct seq_file *, void *, loff_t *); +void neigh_seq_stop(struct seq_file *, void *); + +int neigh_proc_dointvec(struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, + void *buffer, + size_t *lenp, loff_t *ppos); +int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos); + +int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, + proc_handler *proc_handler); +void neigh_sysctl_unregister(struct neigh_parms *p); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __neigh_parms_put(struct neigh_parms *parms) +{ + refcount_dec(&parms->refcnt); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms) +{ + refcount_inc(&parms->refcnt); + return parms; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void neigh_release(struct neighbour *neigh) +{ + if (refcount_dec_and_test(&neigh->refcnt)) + neigh_destroy(neigh); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neighbour * neigh_clone(struct neighbour *neigh) +{ + if (neigh) + refcount_inc(&neigh->refcnt); + return neigh; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) +{ + unsigned long now = jiffies; + + if (({ do { extern void __compiletime_assert_1451(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(neigh->used) == sizeof(char) || sizeof(neigh->used) == sizeof(short) || sizeof(neigh->used) == sizeof(int) || sizeof(neigh->used) == sizeof(long)) || sizeof(neigh->used) == sizeof(long long))) __compiletime_assert_1451(); } while (0); ({ typeof( _Generic((neigh->used), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (neigh->used))) __x = (*(const volatile typeof( _Generic((neigh->used), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (neigh->used))) *)&(neigh->used)); do { } while (0); (typeof(neigh->used))__x; }); }) != now) + do { do { extern void __compiletime_assert_1452(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(neigh->used) == sizeof(char) || sizeof(neigh->used) == sizeof(short) || sizeof(neigh->used) == sizeof(int) || sizeof(neigh->used) == sizeof(long)) || sizeof(neigh->used) == sizeof(long long))) __compiletime_assert_1452(); } while (0); do { *(volatile typeof(neigh->used) *)&(neigh->used) = (now); } while (0); } while (0); + if (!(neigh->nud_state&((0x80|0x40|0x02)|0x08|0x10))) + return __neigh_event_send(neigh, skb); + return 0; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) +{ + unsigned int seq, hh_alen; + + do { + seq = read_seqbegin(&hh->hh_lock); + hh_alen = (((14)+(16 -1))&~(16 - 1)); + memcpy(skb->data - hh_alen, hh->hh_data, 6 + hh_alen - 14); + } while (read_seqretry(&hh->hh_lock, seq)); + return 0; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) +{ + unsigned int hh_alen = 0; + unsigned int seq; + unsigned int hh_len; + + do { + seq = read_seqbegin(&hh->hh_lock); + hh_len = ({ do { extern void __compiletime_assert_1453(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(hh->hh_len) == sizeof(char) || sizeof(hh->hh_len) == sizeof(short) || sizeof(hh->hh_len) == sizeof(int) || sizeof(hh->hh_len) == sizeof(long)) || sizeof(hh->hh_len) == sizeof(long long))) __compiletime_assert_1453(); } while (0); ({ typeof( _Generic((hh->hh_len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (hh->hh_len))) __x = (*(const volatile typeof( _Generic((hh->hh_len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (hh->hh_len))) *)&(hh->hh_len)); do { } while (0); (typeof(hh->hh_len))__x; }); }); + if (__builtin_expect(!!(hh_len <= 16), 1)) { + hh_alen = 16; + + + + + + if (__builtin_expect(!!(skb_headroom(skb) >= 16), 1)) { + + memcpy(skb->data - 16, hh->hh_data, + 16); + } + } else { + hh_alen = (((hh_len)+(16 -1))&~(16 - 1)); + + if (__builtin_expect(!!(skb_headroom(skb) >= hh_alen), 1)) { + memcpy(skb->data - hh_alen, hh->hh_data, + hh_alen); + } + } + } while (read_seqretry(&hh->hh_lock, seq)); + + if (({ int __ret_warn_on = !!(skb_headroom(skb) < hh_alen); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1454)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/neighbour.h"), "i" (492), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1455)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1456)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) { + kfree_skb(skb); + return 0x01; + } + + __skb_push(skb, hh_len); + return dev_queue_xmit(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int neigh_output(struct neighbour *n, struct sk_buff *skb, + bool skip_cache) +{ + const struct hh_cache *hh = &n->hh; + + if ((n->nud_state & (0x80|0x40|0x02)) && hh->hh_len && !skip_cache) + return neigh_hh_output(hh, skb); + else + return n->output(n, skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neighbour * +__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) +{ + struct neighbour *n = neigh_lookup(tbl, pkey, dev); + + if (n || !creat) + return n; + + n = neigh_create(tbl, pkey, dev); + return IS_ERR(n) ? ((void *)0) : n; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neighbour * +__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey, + struct net_device *dev) +{ + struct neighbour *n = neigh_lookup(tbl, pkey, dev); + + if (n) + return n; + + return neigh_create(tbl, pkey, dev); +} + +struct neighbour_cb { + unsigned long sched_next; + unsigned int flags; +}; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void neigh_ha_snapshot(char *dst, const struct neighbour *n, + const struct net_device *dev) +{ + unsigned int seq; + + do { + seq = read_seqbegin(&n->ha_lock); + memcpy(dst, n->ha, dev->addr_len); + } while (read_seqretry(&n->ha_lock, seq)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void neigh_update_is_router(struct neighbour *neigh, u32 flags, + int *notify) +{ + u8 ndm_flags = 0; + + ndm_flags |= (flags & 0x40000000) ? 0x80 : 0; + if ((neigh->flags ^ ndm_flags) & 0x80) { + if (ndm_flags & 0x80) + neigh->flags |= 0x80; + else + neigh->flags &= ~0x80; + *notify = 1; + } +} +# 20 "./include/net/dst.h" 2 + + +struct sk_buff; + +struct dst_entry { + struct net_device *dev; + struct dst_ops *ops; + unsigned long _metrics; + unsigned long expires; + + struct xfrm_state *xfrm; + + + + int (*input)(struct sk_buff *); + int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); + + unsigned short flags; +# 54 "./include/net/dst.h" + short obsolete; + + + + + unsigned short header_len; + unsigned short trailer_len; + + + + + + + atomic_t __refcnt; + + int __use; + unsigned long lastuse; + struct lwtunnel_state *lwtstate; + struct callback_head callback_head; + short error; + short __pad; + __u32 tclassid; + + + +}; + +struct dst_metrics { + u32 metrics[(__RTAX_MAX - 1)]; + refcount_t refcnt; +} __attribute__((__aligned__(4))); +extern const struct dst_metrics dst_default_metrics; + +u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); +# 96 "./include/net/dst.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dst_metrics_read_only(const struct dst_entry *dst) +{ + return dst->_metrics & 0x1UL; +} + +void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_destroy_metrics_generic(struct dst_entry *dst) +{ + unsigned long val = dst->_metrics; + if (!(val & 0x1UL)) + __dst_destroy_metrics_generic(dst, val); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 *dst_metrics_write_ptr(struct dst_entry *dst) +{ + unsigned long p = dst->_metrics; + + do { if (__builtin_expect(!!(!p), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1457)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/dst.h"), "i" (114), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1458)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + + if (p & 0x1UL) + return dst->ops->cow_metrics(dst, p); + return ((u32 *)((p) & ~0x3UL)); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_init_metrics(struct dst_entry *dst, + const u32 *src_metrics, + bool read_only) +{ + dst->_metrics = ((unsigned long) src_metrics) | + (read_only ? 0x1UL : 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) +{ + u32 *dst_metrics = dst_metrics_write_ptr(dest); + + if (dst_metrics) { + u32 *src_metrics = ((u32 *)(((src)->_metrics) & ~0x3UL)); + + memcpy(dst_metrics, src_metrics, (__RTAX_MAX - 1) * sizeof(u32)); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 *dst_metrics_ptr(struct dst_entry *dst) +{ + return ((u32 *)(((dst)->_metrics) & ~0x3UL)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 +dst_metric_raw(const struct dst_entry *dst, const int metric) +{ + u32 *p = ((u32 *)(((dst)->_metrics) & ~0x3UL)); + + return p[metric-1]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 +dst_metric(const struct dst_entry *dst, const int metric) +{ + ({ int __ret_warn_on = !!(metric == RTAX_HOPLIMIT || metric == RTAX_ADVMSS || metric == RTAX_MTU); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1459)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/dst.h"), "i" (159), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1460)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1461)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }) + + ; + return dst_metric_raw(dst, metric); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 +dst_metric_advmss(const struct dst_entry *dst) +{ + u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); + + if (!advmss) + advmss = dst->ops->default_advmss(dst); + + return advmss; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_metric_set(struct dst_entry *dst, int metric, u32 val) +{ + u32 *p = dst_metrics_write_ptr(dst); + + if (p) + p[metric-1] = val; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 +dst_feature(const struct dst_entry *dst, u32 feature) +{ + return dst_metric(dst, RTAX_FEATURES) & feature; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 dst_mtu(const struct dst_entry *dst) +{ + return dst->ops->mtu(dst); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) +{ + return msecs_to_jiffies(dst_metric(dst, metric)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 +dst_allfrag(const struct dst_entry *dst) +{ + int ret = dst_feature(dst, (1 << 3)); + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +dst_metric_locked(const struct dst_entry *dst, int metric) +{ + return dst_metric(dst, RTAX_LOCK) & (1<__refcnt) == 0); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1463)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/dst.h"), "i" (227), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1464)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1465)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_use_noref(struct dst_entry *dst, unsigned long time) +{ + if (__builtin_expect(!!(time != dst->lastuse), 0)) { + dst->__use++; + dst->lastuse = time; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_hold_and_use(struct dst_entry *dst, unsigned long time) +{ + dst_hold(dst); + dst_use_noref(dst, time); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dst_entry *dst_clone(struct dst_entry *dst) +{ + if (dst) + dst_hold(dst); + return dst; +} + +void dst_release(struct dst_entry *dst); + +void dst_release_immediate(struct dst_entry *dst); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void refdst_drop(unsigned long refdst) +{ + if (!(refdst & 1UL)) + dst_release((struct dst_entry *)(refdst & ~(1UL))); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_dst_drop(struct sk_buff *skb) +{ + if (skb->_skb_refdst) { + refdst_drop(skb->_skb_refdst); + skb->_skb_refdst = 0UL; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst) +{ + nskb->_skb_refdst = refdst; + if (!(nskb->_skb_refdst & 1UL)) + dst_clone(skb_dst(nskb)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) +{ + __skb_dst_copy(nskb, oskb->_skb_refdst); +} +# 294 "./include/net/dst.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dst_hold_safe(struct dst_entry *dst) +{ + return atomic_inc_not_zero(&dst->__refcnt); +} +# 306 "./include/net/dst.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skb_dst_force(struct sk_buff *skb) +{ + if (skb_dst_is_noref(skb)) { + struct dst_entry *dst = skb_dst(skb); + + ({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1466)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/dst.h"), "i" (311), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1467)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1468)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + if (!dst_hold_safe(dst)) + dst = ((void *)0); + + skb->_skb_refdst = (unsigned long)dst; + } + + return skb->_skb_refdst != 0UL; +} +# 331 "./include/net/dst.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, + struct net *net) +{ + skb->dev = dev; + + + + + + + skb_clear_hash_if_not_l4(skb); + skb_set_queue_mapping(skb, 0); + skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); +} +# 356 "./include/net/dst.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, + struct net *net) +{ + + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + __skb_tunnel_rx(skb, dev, net); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 dst_tclassid(const struct sk_buff *skb) +{ + + const struct dst_entry *dst; + + dst = skb_dst(skb); + if (dst) + return dst->tclassid; + + return 0; +} + +int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dst_discard(struct sk_buff *skb) +{ + return dst_discard_out(&init_net, skb->sk, skb); +} +void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, + int initial_obsolete, unsigned short flags); +void dst_init(struct dst_entry *dst, struct dst_ops *ops, + struct net_device *dev, int initial_ref, int initial_obsolete, + unsigned short flags); +struct dst_entry *dst_destroy(struct dst_entry *dst); +void dst_dev_put(struct dst_entry *dst); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_confirm(struct dst_entry *dst) +{ +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) +{ + struct neighbour *n = dst->ops->neigh_lookup(dst, ((void *)0), daddr); + return IS_ERR(n) ? ((void *)0) : n; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, + struct sk_buff *skb) +{ + struct neighbour *n = dst->ops->neigh_lookup(dst, skb, ((void *)0)); + return IS_ERR(n) ? ((void *)0) : n; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_confirm_neigh(const struct dst_entry *dst, + const void *daddr) +{ + if (dst->ops->confirm_neigh) + dst->ops->confirm_neigh(dst, daddr); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_link_failure(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + if (dst && dst->ops && dst->ops->link_failure) + dst->ops->link_failure(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_set_expires(struct dst_entry *dst, int timeout) +{ + unsigned long expires = jiffies + timeout; + + if (expires == 0) + expires = 1; + + if (dst->expires == 0 || (({ unsigned long __dummy; typeof(dst->expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((expires) - (dst->expires)) < 0))) + dst->expires = expires; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + return skb_dst(skb)->output(net, sk, skb); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int dst_input(struct sk_buff *skb) +{ + return skb_dst(skb)->input(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) +{ + if (dst->obsolete) + dst = dst->ops->check(dst, cookie); + return dst; +} + + +enum { + XFRM_LOOKUP_ICMP = 1 << 0, + XFRM_LOOKUP_QUEUE = 1 << 1, + XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, +}; + +struct flowi; +# 492 "./include/net/dst.h" +struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, + const struct flowi *fl, const struct sock *sk, + int flags); + +struct dst_entry *xfrm_lookup_with_ifid(struct net *net, + struct dst_entry *dst_orig, + const struct flowi *fl, + const struct sock *sk, int flags, + u32 if_id); + +struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, + const struct flowi *fl, const struct sock *sk, + int flags); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct xfrm_state *dst_xfrm(const struct dst_entry *dst) +{ + return dst->xfrm; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu) +{ + struct dst_entry *dst = skb_dst(skb); + + if (dst && dst->ops->update_pmtu) + dst->ops->update_pmtu(dst, ((void *)0), skb, mtu, true); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) +{ + struct dst_entry *dst = skb_dst(skb); + + if (dst && dst->ops->update_pmtu) + dst->ops->update_pmtu(dst, ((void *)0), skb, mtu, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_tunnel_check_pmtu(struct sk_buff *skb, + struct dst_entry *encap_dst, + int headroom) +{ + u32 encap_mtu = dst_mtu(encap_dst); + + if (skb->len > encap_mtu - headroom) + skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom); +} +# 66 "./include/net/sock.h" 2 + +# 1 "./include/net/tcp_states.h" 1 +# 12 "./include/net/tcp_states.h" +enum { + TCP_ESTABLISHED = 1, + TCP_SYN_SENT, + TCP_SYN_RECV, + TCP_FIN_WAIT1, + TCP_FIN_WAIT2, + TCP_TIME_WAIT, + TCP_CLOSE, + TCP_CLOSE_WAIT, + TCP_LAST_ACK, + TCP_LISTEN, + TCP_CLOSING, + TCP_NEW_SYN_RECV, + + TCP_MAX_STATES +}; + + + + + +enum { + TCPF_ESTABLISHED = (1 << TCP_ESTABLISHED), + TCPF_SYN_SENT = (1 << TCP_SYN_SENT), + TCPF_SYN_RECV = (1 << TCP_SYN_RECV), + TCPF_FIN_WAIT1 = (1 << TCP_FIN_WAIT1), + TCPF_FIN_WAIT2 = (1 << TCP_FIN_WAIT2), + TCPF_TIME_WAIT = (1 << TCP_TIME_WAIT), + TCPF_CLOSE = (1 << TCP_CLOSE), + TCPF_CLOSE_WAIT = (1 << TCP_CLOSE_WAIT), + TCPF_LAST_ACK = (1 << TCP_LAST_ACK), + TCPF_LISTEN = (1 << TCP_LISTEN), + TCPF_CLOSING = (1 << TCP_CLOSING), + TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV), +}; +# 68 "./include/net/sock.h" 2 +# 1 "./include/uapi/linux/net_tstamp.h" 1 +# 17 "./include/uapi/linux/net_tstamp.h" +enum { + SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), + SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1), + SOF_TIMESTAMPING_RX_HARDWARE = (1<<2), + SOF_TIMESTAMPING_RX_SOFTWARE = (1<<3), + SOF_TIMESTAMPING_SOFTWARE = (1<<4), + SOF_TIMESTAMPING_SYS_HARDWARE = (1<<5), + SOF_TIMESTAMPING_RAW_HARDWARE = (1<<6), + SOF_TIMESTAMPING_OPT_ID = (1<<7), + SOF_TIMESTAMPING_TX_SCHED = (1<<8), + SOF_TIMESTAMPING_TX_ACK = (1<<9), + SOF_TIMESTAMPING_OPT_CMSG = (1<<10), + SOF_TIMESTAMPING_OPT_TSONLY = (1<<11), + SOF_TIMESTAMPING_OPT_STATS = (1<<12), + SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13), + SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14), + + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW, + SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | + SOF_TIMESTAMPING_LAST +}; +# 62 "./include/uapi/linux/net_tstamp.h" +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + + +enum hwtstamp_tx_types { + + + + + + HWTSTAMP_TX_OFF, + + + + + + + + HWTSTAMP_TX_ON, +# 92 "./include/uapi/linux/net_tstamp.h" + HWTSTAMP_TX_ONESTEP_SYNC, + + + + + + + + HWTSTAMP_TX_ONESTEP_P2P, + + + __HWTSTAMP_TX_CNT +}; + + +enum hwtstamp_rx_filters { + + HWTSTAMP_FILTER_NONE, + + + HWTSTAMP_FILTER_ALL, + + + HWTSTAMP_FILTER_SOME, + + + HWTSTAMP_FILTER_PTP_V1_L4_EVENT, + + HWTSTAMP_FILTER_PTP_V1_L4_SYNC, + + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ, + + HWTSTAMP_FILTER_PTP_V2_L4_EVENT, + + HWTSTAMP_FILTER_PTP_V2_L4_SYNC, + + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ, + + + HWTSTAMP_FILTER_PTP_V2_L2_EVENT, + + HWTSTAMP_FILTER_PTP_V2_L2_SYNC, + + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ, + + + HWTSTAMP_FILTER_PTP_V2_EVENT, + + HWTSTAMP_FILTER_PTP_V2_SYNC, + + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ, + + + HWTSTAMP_FILTER_NTP_ALL, + + + __HWTSTAMP_FILTER_CNT +}; + + +struct scm_ts_pktinfo { + __u32 if_index; + __u32 pkt_length; + __u32 reserved[2]; +}; + + + + + +enum txtime_flags { + SOF_TXTIME_DEADLINE_MODE = (1 << 0), + SOF_TXTIME_REPORT_ERRORS = (1 << 1), + + SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS, + SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) | + SOF_TXTIME_FLAGS_LAST +}; + +struct sock_txtime { + __kernel_clockid_t clockid; + __u32 flags; +}; +# 69 "./include/net/sock.h" 2 +# 1 "./include/net/l3mdev.h" 1 +# 11 "./include/net/l3mdev.h" +# 1 "./include/net/fib_rules.h" 1 + + + + + + + +# 1 "./include/uapi/linux/fib_rules.h" 1 +# 19 "./include/uapi/linux/fib_rules.h" +struct fib_rule_hdr { + __u8 family; + __u8 dst_len; + __u8 src_len; + __u8 tos; + + __u8 table; + __u8 res1; + __u8 res2; + __u8 action; + + __u32 flags; +}; + +struct fib_rule_uid_range { + __u32 start; + __u32 end; +}; + +struct fib_rule_port_range { + __u16 start; + __u16 end; +}; + +enum { + FRA_UNSPEC, + FRA_DST, + FRA_SRC, + FRA_IIFNAME, + + FRA_GOTO, + FRA_UNUSED2, + FRA_PRIORITY, + FRA_UNUSED3, + FRA_UNUSED4, + FRA_UNUSED5, + FRA_FWMARK, + FRA_FLOW, + FRA_TUN_ID, + FRA_SUPPRESS_IFGROUP, + FRA_SUPPRESS_PREFIXLEN, + FRA_TABLE, + FRA_FWMASK, + FRA_OIFNAME, + FRA_PAD, + FRA_L3MDEV, + FRA_UID_RANGE, + FRA_PROTOCOL, + FRA_IP_PROTO, + FRA_SPORT_RANGE, + FRA_DPORT_RANGE, + __FRA_MAX +}; + + + +enum { + FR_ACT_UNSPEC, + FR_ACT_TO_TBL, + FR_ACT_GOTO, + FR_ACT_NOP, + FR_ACT_RES3, + FR_ACT_RES4, + FR_ACT_BLACKHOLE, + FR_ACT_UNREACHABLE, + FR_ACT_PROHIBIT, + __FR_ACT_MAX, +}; +# 9 "./include/net/fib_rules.h" 2 + + + +# 1 "./include/net/fib_notifier.h" 1 + + + + + + + +struct module; + +struct fib_notifier_info { + int family; + struct netlink_ext_ack *extack; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE, + FIB_EVENT_ENTRY_APPEND, + FIB_EVENT_ENTRY_ADD, + FIB_EVENT_ENTRY_DEL, + FIB_EVENT_RULE_ADD, + FIB_EVENT_RULE_DEL, + FIB_EVENT_NH_ADD, + FIB_EVENT_NH_DEL, + FIB_EVENT_VIF_ADD, + FIB_EVENT_VIF_DEL, +}; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(struct net *net); + int (*fib_dump)(struct net *net, struct notifier_block *nb, + struct netlink_ext_ack *extack); + struct module *owner; + struct callback_head rcu; +}; + +int call_fib_notifier(struct notifier_block *nb, + enum fib_event_type event_type, + struct fib_notifier_info *info); +int call_fib_notifiers(struct net *net, enum fib_event_type event_type, + struct fib_notifier_info *info); +int register_fib_notifier(struct net *net, struct notifier_block *nb, + void (*cb)(struct notifier_block *nb), + struct netlink_ext_ack *extack); +int unregister_fib_notifier(struct net *net, struct notifier_block *nb); +struct fib_notifier_ops * +fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net); +void fib_notifier_ops_unregister(struct fib_notifier_ops *ops); +# 13 "./include/net/fib_rules.h" 2 + +struct fib_kuid_range { + kuid_t start; + kuid_t end; +}; + +struct fib_rule { + struct list_head list; + int iifindex; + int oifindex; + u32 mark; + u32 mark_mask; + u32 flags; + u32 table; + u8 action; + u8 l3mdev; + u8 proto; + u8 ip_proto; + u32 target; + __be64 tun_id; + struct fib_rule *ctarget; + struct net *fr_net; + + refcount_t refcnt; + u32 pref; + int suppress_ifgroup; + int suppress_prefixlen; + char iifname[16]; + char oifname[16]; + struct fib_kuid_range uid_range; + struct fib_rule_port_range sport_range; + struct fib_rule_port_range dport_range; + struct callback_head rcu; +}; + +struct fib_lookup_arg { + void *lookup_ptr; + const void *lookup_data; + void *result; + struct fib_rule *rule; + u32 table; + int flags; + + +}; + +struct fib_rules_ops { + int family; + struct list_head list; + int rule_size; + int addr_size; + int unresolved_rules; + int nr_goto_rules; + unsigned int fib_rules_seq; + + int (*action)(struct fib_rule *, + struct flowi *, int, + struct fib_lookup_arg *); + bool (*suppress)(struct fib_rule *, + struct fib_lookup_arg *); + int (*match)(struct fib_rule *, + struct flowi *, int); + int (*configure)(struct fib_rule *, + struct sk_buff *, + struct fib_rule_hdr *, + struct nlattr **, + struct netlink_ext_ack *); + int (*delete)(struct fib_rule *); + int (*compare)(struct fib_rule *, + struct fib_rule_hdr *, + struct nlattr **); + int (*fill)(struct fib_rule *, struct sk_buff *, + struct fib_rule_hdr *); + size_t (*nlmsg_payload)(struct fib_rule *); + + + + void (*flush_cache)(struct fib_rules_ops *ops); + + int nlgroup; + const struct nla_policy *policy; + struct list_head rules_list; + struct module *owner; + struct net *fro_net; + struct callback_head rcu; +}; + +struct fib_rule_notifier_info { + struct fib_notifier_info info; + struct fib_rule *rule; +}; +# 125 "./include/net/fib_rules.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fib_rule_get(struct fib_rule *rule) +{ + refcount_inc(&rule->refcnt); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fib_rule_put(struct fib_rule *rule) +{ + if (refcount_dec_and_test(&rule->refcnt)) + do { typeof (rule) ___p = (rule); if (___p) do { do { extern void __compiletime_assert_1469(void) __attribute__((__error__("BUILD_BUG_ON failed: " "!__is_kfree_rcu_offset(__builtin_offsetof(typeof(*(rule)), rcu))"))); if (!(!(!((__builtin_offsetof(typeof(*(rule)), rcu)) < 4096)))) __compiletime_assert_1469(); } while (0); kfree_call_rcu(&((___p)->rcu), (rcu_callback_t)(unsigned long)(__builtin_offsetof(typeof(*(rule)), rcu))); } while (0); } while (0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 fib_rule_get_table(struct fib_rule *rule, + struct fib_lookup_arg *arg) +{ + return rule->l3mdev ? arg->table : rule->table; +} +# 150 "./include/net/fib_rules.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla) +{ + if (nla[FRA_TABLE]) + return nla_get_u32(nla[FRA_TABLE]); + return frh->table; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fib_rule_port_range_set(const struct fib_rule_port_range *range) +{ + return range->start != 0 && range->end != 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fib_rule_port_inrange(const struct fib_rule_port_range *a, + __be16 port) +{ + return (__u16)__builtin_bswap16((__u16)(( __u16)(__be16)(port))) >= a->start && + (__u16)__builtin_bswap16((__u16)(( __u16)(__be16)(port))) <= a->end; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fib_rule_port_range_valid(const struct fib_rule_port_range *a) +{ + return a->start != 0 && a->end != 0 && a->end < 0xffff && + a->start <= a->end; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fib_rule_port_range_compare(struct fib_rule_port_range *a, + struct fib_rule_port_range *b) +{ + return a->start == b->start && + a->end == b->end; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fib_rule_requires_fldissect(struct fib_rule *rule) +{ + return rule->iifindex != 1 && (rule->ip_proto || + fib_rule_port_range_set(&rule->sport_range) || + fib_rule_port_range_set(&rule->dport_range)); +} + +struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, + struct net *); +void fib_rules_unregister(struct fib_rules_ops *); + +int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags, + struct fib_lookup_arg *); +int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, + u32 flags); +bool fib_rule_matchall(const struct fib_rule *rule); +int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, + struct netlink_ext_ack *extack); +unsigned int fib_rules_seq_read(struct net *net, int family); + +int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack); +int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack); +# 12 "./include/net/l3mdev.h" 2 +# 25 "./include/net/l3mdev.h" +struct l3mdev_ops { + u32 (*l3mdev_fib_table)(const struct net_device *dev); + struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev, + struct sk_buff *skb, u16 proto); + struct sk_buff * (*l3mdev_l3_out)(struct net_device *dev, + struct sock *sk, struct sk_buff *skb, + u16 proto); + + + struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev, + struct flowi6 *fl6); +}; + + + +int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, + struct fib_lookup_arg *arg); + +void l3mdev_update_flow(struct net *net, struct flowi *fl); + +int l3mdev_master_ifindex_rcu(const struct net_device *dev); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int l3mdev_master_ifindex(struct net_device *dev) +{ + int ifindex; + + rcu_read_lock(); + ifindex = l3mdev_master_ifindex_rcu(dev); + rcu_read_unlock(); + + return ifindex; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int l3mdev_master_ifindex_by_index(struct net *net, int ifindex) +{ + struct net_device *dev; + int rc = 0; + + if (__builtin_expect(!!(ifindex), 1)) { + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (dev) + rc = l3mdev_master_ifindex_rcu(dev); + + rcu_read_unlock(); + } + + return rc; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev) +{ + + + + + + + struct net_device *dev = (struct net_device *)_dev; + struct net_device *master; + + if (!dev) + return ((void *)0); + + if (netif_is_l3_master(dev)) + master = dev; + else if (netif_is_l3_slave(dev)) + master = netdev_master_upper_dev_get_rcu(dev); + else + master = ((void *)0); + + return master; +} + +int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex) +{ + rcu_read_lock(); + ifindex = l3mdev_master_upper_ifindex_by_index_rcu(net, ifindex); + rcu_read_unlock(); + + return ifindex; +} + +u32 l3mdev_fib_table_rcu(const struct net_device *dev); +u32 l3mdev_fib_table_by_index(struct net *net, int ifindex); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 l3mdev_fib_table(const struct net_device *dev) +{ + u32 tb_id; + + rcu_read_lock(); + tb_id = l3mdev_fib_table_rcu(dev); + rcu_read_unlock(); + + return tb_id; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool netif_index_is_l3_master(struct net *net, int ifindex) +{ + struct net_device *dev; + bool rc = false; + + if (ifindex == 0) + return false; + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (dev) + rc = netif_is_l3_master(dev); + + rcu_read_unlock(); + + return rc; +} + +struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) +{ + struct net_device *master = ((void *)0); + + if (netif_is_l3_slave(skb->dev)) + master = netdev_master_upper_dev_get_rcu(skb->dev); + else if (netif_is_l3_master(skb->dev) || + netif_has_l3_rx_handler(skb->dev)) + master = skb->dev; + + if (master && master->l3mdev_ops->l3mdev_l3_rcv) + skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto); + + return skb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb) +{ + return l3mdev_l3_rcv(skb, 2); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb) +{ + return l3mdev_l3_rcv(skb, 10); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto) +{ + struct net_device *dev = skb_dst(skb)->dev; + + if (netif_is_l3_slave(dev)) { + struct net_device *master; + + master = netdev_master_upper_dev_get_rcu(dev); + if (master && master->l3mdev_ops->l3mdev_l3_out) + skb = master->l3mdev_ops->l3mdev_l3_out(master, sk, + skb, proto); + } + + return skb; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb) +{ + return l3mdev_l3_out(sk, skb, 2); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb) +{ + return l3mdev_l3_out(sk, skb, 10); +} +# 70 "./include/net/sock.h" 2 +# 94 "./include/net/sock.h" +typedef struct { + spinlock_t slock; + int owned; + wait_queue_head_t wq; + + + + + + + + struct lockdep_map dep_map; + +} socket_lock_t; + +struct sock; +struct proto; +struct net; + +typedef __u32 __portpair; +typedef __u64 __addrpair; +# 161 "./include/net/sock.h" +struct sock_common { + + + + union { + __addrpair skc_addrpair; + struct { + __be32 skc_daddr; + __be32 skc_rcv_saddr; + }; + }; + union { + unsigned int skc_hash; + __u16 skc_u16hashes[2]; + }; + + union { + __portpair skc_portpair; + struct { + __be16 skc_dport; + __u16 skc_num; + }; + }; + + unsigned short skc_family; + volatile unsigned char skc_state; + unsigned char skc_reuse:4; + unsigned char skc_reuseport:1; + unsigned char skc_ipv6only:1; + unsigned char skc_net_refcnt:1; + int skc_bound_dev_if; + union { + struct hlist_node skc_bind_node; + struct hlist_node skc_portaddr_node; + }; + struct proto *skc_prot; + possible_net_t skc_net; + + + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; + + + atomic64_t skc_cookie; + + + + + + + union { + unsigned long skc_flags; + struct sock *skc_listener; + struct inet_timewait_death_row *skc_tw_dr; + }; + + + + + + int skc_dontcopy_begin[0]; + + union { + struct hlist_node skc_node; + struct hlist_nulls_node skc_nulls_node; + }; + unsigned short skc_tx_queue_mapping; + + unsigned short skc_rx_queue_mapping; + + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; + }; + + refcount_t skc_refcnt; + + int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; + }; + +}; + +struct bpf_sk_storage; +# 346 "./include/net/sock.h" +struct sock { + + + + + struct sock_common __sk_common; +# 386 "./include/net/sock.h" + socket_lock_t sk_lock; + atomic_t sk_drops; + int sk_rcvlowat; + struct sk_buff_head sk_error_queue; + struct sk_buff *sk_rx_skb_cache; + struct sk_buff_head sk_receive_queue; +# 400 "./include/net/sock.h" + struct { + atomic_t rmem_alloc; + int len; + struct sk_buff *head; + struct sk_buff *tail; + } sk_backlog; + + + int sk_forward_alloc; + + unsigned int sk_ll_usec; + + unsigned int sk_napi_id; + + int sk_rcvbuf; + + struct sk_filter *sk_filter; + union { + struct socket_wq *sk_wq; + + struct socket_wq *sk_wq_raw; + + }; + + struct xfrm_policy *sk_policy[2]; + + struct dst_entry *sk_rx_dst; + struct dst_entry *sk_dst_cache; + atomic_t sk_omem_alloc; + int sk_sndbuf; + + + int sk_wmem_queued; + refcount_t sk_wmem_alloc; + unsigned long sk_tsq_flags; + union { + struct sk_buff *sk_send_head; + struct rb_root tcp_rtx_queue; + }; + struct sk_buff *sk_tx_skb_cache; + struct sk_buff_head sk_write_queue; + __s32 sk_peek_off; + int sk_write_pending; + __u32 sk_dst_pending_confirm; + u32 sk_pacing_status; + long sk_sndtimeo; + struct timer_list sk_timer; + __u32 sk_priority; + __u32 sk_mark; + unsigned long sk_pacing_rate; + unsigned long sk_max_pacing_rate; + struct page_frag sk_frag; + netdev_features_t sk_route_caps; + netdev_features_t sk_route_nocaps; + netdev_features_t sk_route_forced_caps; + int sk_gso_type; + unsigned int sk_gso_max_size; + gfp_t sk_allocation; + __u32 sk_txhash; + + + + + + u8 sk_padding : 1, + sk_kern_sock : 1, + sk_no_check_tx : 1, + sk_no_check_rx : 1, + sk_userlocks : 4; + u8 sk_pacing_shift; + u16 sk_type; + u16 sk_protocol; + u16 sk_gso_max_segs; + unsigned long sk_lingertime; + struct proto *sk_prot_creator; + rwlock_t sk_callback_lock; + int sk_err, + sk_err_soft; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; + kuid_t sk_uid; + struct pid *sk_peer_pid; + const struct cred *sk_peer_cred; + long sk_rcvtimeo; + ktime_t sk_stamp; + + + + u16 sk_tsflags; + u8 sk_shutdown; + u32 sk_tskey; + atomic_t sk_zckey; + + u8 sk_clockid; + u8 sk_txtime_deadline_mode : 1, + sk_txtime_report_errors : 1, + sk_txtime_unused : 6; + + struct socket *sk_socket; + void *sk_user_data; + + void *sk_security; + + struct sock_cgroup_data sk_cgrp_data; + struct mem_cgroup *sk_memcg; + void (*sk_state_change)(struct sock *sk); + void (*sk_data_ready)(struct sock *sk); + void (*sk_write_space)(struct sock *sk); + void (*sk_error_report)(struct sock *sk); + int (*sk_backlog_rcv)(struct sock *sk, + struct sk_buff *skb); + + struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk, + struct net_device *dev, + struct sk_buff *skb); + + void (*sk_destruct)(struct sock *sk); + struct sock_reuseport *sk_reuseport_cb; + + struct bpf_sk_storage *sk_bpf_storage; + + struct callback_head sk_rcu; +}; + +enum sk_pacing { + SK_PACING_NONE = 0, + SK_PACING_NEEDED = 1, + SK_PACING_FQ = 2, +}; +# 542 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_user_data_is_nocopy(const struct sock *sk) +{ + return ((uintptr_t)sk->sk_user_data & 1UL); +} +# 579 "./include/net/sock.h" +int sk_set_peek_off(struct sock *sk, int val); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_peek_offset(struct sock *sk, int flags) +{ + if (__builtin_expect(!!(flags & 2), 0)) { + return ({ do { extern void __compiletime_assert_1470(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_peek_off) == sizeof(char) || sizeof(sk->sk_peek_off) == sizeof(short) || sizeof(sk->sk_peek_off) == sizeof(int) || sizeof(sk->sk_peek_off) == sizeof(long)) || sizeof(sk->sk_peek_off) == sizeof(long long))) __compiletime_assert_1470(); } while (0); ({ typeof( _Generic((sk->sk_peek_off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_peek_off))) __x = (*(const volatile typeof( _Generic((sk->sk_peek_off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_peek_off))) *)&(sk->sk_peek_off)); do { } while (0); (typeof(sk->sk_peek_off))__x; }); }); + } + + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_peek_offset_bwd(struct sock *sk, int val) +{ + s32 off = ({ do { extern void __compiletime_assert_1471(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_peek_off) == sizeof(char) || sizeof(sk->sk_peek_off) == sizeof(short) || sizeof(sk->sk_peek_off) == sizeof(int) || sizeof(sk->sk_peek_off) == sizeof(long)) || sizeof(sk->sk_peek_off) == sizeof(long long))) __compiletime_assert_1471(); } while (0); ({ typeof( _Generic((sk->sk_peek_off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_peek_off))) __x = (*(const volatile typeof( _Generic((sk->sk_peek_off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_peek_off))) *)&(sk->sk_peek_off)); do { } while (0); (typeof(sk->sk_peek_off))__x; }); }); + + if (__builtin_expect(!!(off >= 0), 0)) { + off = __builtin_choose_expr(((!!(sizeof((typeof((s32)(off - val)) *)1 == (typeof((s32)(0)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((s32)(off - val)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((s32)(0)) * 0l)) : (int *)8))))), (((s32)(off - val)) > ((s32)(0)) ? ((s32)(off - val)) : ((s32)(0))), ({ typeof((s32)(off - val)) __UNIQUE_ID___x1472 = ((s32)(off - val)); typeof((s32)(0)) __UNIQUE_ID___y1473 = ((s32)(0)); ((__UNIQUE_ID___x1472) > (__UNIQUE_ID___y1473) ? (__UNIQUE_ID___x1472) : (__UNIQUE_ID___y1473)); })); + do { do { extern void __compiletime_assert_1474(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_peek_off) == sizeof(char) || sizeof(sk->sk_peek_off) == sizeof(short) || sizeof(sk->sk_peek_off) == sizeof(int) || sizeof(sk->sk_peek_off) == sizeof(long)) || sizeof(sk->sk_peek_off) == sizeof(long long))) __compiletime_assert_1474(); } while (0); do { *(volatile typeof(sk->sk_peek_off) *)&(sk->sk_peek_off) = (off); } while (0); } while (0); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_peek_offset_fwd(struct sock *sk, int val) +{ + sk_peek_offset_bwd(sk, -val); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock *sk_entry(const struct hlist_node *node) +{ + return ({ void *__mptr = (void *)(node); do { extern void __compiletime_assert_1475(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(node)), typeof(((struct sock *)0)->__sk_common.skc_node)) && !__builtin_types_compatible_p(typeof(*(node)), typeof(void))))) __compiletime_assert_1475(); } while (0); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_node))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock *__sk_head(const struct hlist_head *head) +{ + return ({ void *__mptr = (void *)(head->first); do { extern void __compiletime_assert_1476(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(head->first)), typeof(((struct sock *)0)->__sk_common.skc_node)) && !__builtin_types_compatible_p(typeof(*(head->first)), typeof(void))))) __compiletime_assert_1476(); } while (0); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_node))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock *sk_head(const struct hlist_head *head) +{ + return hlist_empty(head) ? ((void *)0) : __sk_head(head); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) +{ + return ({ void *__mptr = (void *)(head->first); do { extern void __compiletime_assert_1477(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(head->first)), typeof(((struct sock *)0)->__sk_common.skc_nulls_node)) && !__builtin_types_compatible_p(typeof(*(head->first)), typeof(void))))) __compiletime_assert_1477(); } while (0); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_nulls_node))); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock *sk_nulls_head(const struct hlist_nulls_head *head) +{ + return hlist_nulls_empty(head) ? ((void *)0) : __sk_nulls_head(head); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock *sk_next(const struct sock *sk) +{ + return ({ typeof(sk->__sk_common.skc_node.next) ____ptr = (sk->__sk_common.skc_node.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1478(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((struct sock *)0)->__sk_common.skc_node)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1478(); } while (0); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_node))); }) : ((void *)0); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock *sk_nulls_next(const struct sock *sk) +{ + return (!is_a_nulls(sk->__sk_common.skc_nulls_node.next)) ? + ({ void *__mptr = (void *)(sk->__sk_common.skc_nulls_node.next); do { extern void __compiletime_assert_1479(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(sk->__sk_common.skc_nulls_node.next)), typeof(((struct sock *)0)->__sk_common.skc_nulls_node)) && !__builtin_types_compatible_p(typeof(*(sk->__sk_common.skc_nulls_node.next)), typeof(void))))) __compiletime_assert_1479(); } while (0); ((struct sock *)(__mptr - __builtin_offsetof(struct sock, __sk_common.skc_nulls_node))); }) + : + ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_unhashed(const struct sock *sk) +{ + return hlist_unhashed(&sk->__sk_common.skc_node); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_hashed(const struct sock *sk) +{ + return !sk_unhashed(sk); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_node_init(struct hlist_node *node) +{ + node->pprev = ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_nulls_node_init(struct hlist_nulls_node *node) +{ + node->pprev = ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __sk_del_node(struct sock *sk) +{ + __hlist_del(&sk->__sk_common.skc_node); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __sk_del_node_init(struct sock *sk) +{ + if (sk_hashed(sk)) { + __sk_del_node(sk); + sk_node_init(&sk->__sk_common.skc_node); + return true; + } + return false; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void sock_hold(struct sock *sk) +{ + refcount_inc(&sk->__sk_common.skc_refcnt); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__always_inline__)) void __sock_put(struct sock *sk) +{ + refcount_dec(&sk->__sk_common.skc_refcnt); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_del_node_init(struct sock *sk) +{ + bool rc = __sk_del_node_init(sk); + + if (rc) { + + ({ int __ret_warn_on = !!(refcount_read(&sk->__sk_common.skc_refcnt) == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1480)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sock.h"), "i" (707), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1481)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1482)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + __sock_put(sk); + } + return rc; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __sk_nulls_del_node_init_rcu(struct sock *sk) +{ + if (sk_hashed(sk)) { + hlist_nulls_del_init_rcu(&sk->__sk_common.skc_nulls_node); + return true; + } + return false; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_nulls_del_node_init_rcu(struct sock *sk) +{ + bool rc = __sk_nulls_del_node_init_rcu(sk); + + if (rc) { + + ({ int __ret_warn_on = !!(refcount_read(&sk->__sk_common.skc_refcnt) == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1483)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sock.h"), "i" (729), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1484)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1485)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + __sock_put(sk); + } + return rc; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __sk_add_node(struct sock *sk, struct hlist_head *list) +{ + hlist_add_head(&sk->__sk_common.skc_node, list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_add_node(struct sock *sk, struct hlist_head *list) +{ + sock_hold(sk); + __sk_add_node(sk, list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) +{ + sock_hold(sk); + if (1 && sk->__sk_common.skc_reuseport && + sk->__sk_common.skc_family == 10) + hlist_add_tail_rcu(&sk->__sk_common.skc_node, list); + else + hlist_add_head_rcu(&sk->__sk_common.skc_node, list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list) +{ + sock_hold(sk); + hlist_add_tail_rcu(&sk->__sk_common.skc_node, list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) +{ + hlist_nulls_add_head_rcu(&sk->__sk_common.skc_nulls_node, list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list) +{ + hlist_nulls_add_tail_rcu(&sk->__sk_common.skc_nulls_node, list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) +{ + sock_hold(sk); + __sk_nulls_add_node_rcu(sk, list); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __sk_del_bind_node(struct sock *sk) +{ + __hlist_del(&sk->__sk_common.skc_bind_node); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_add_bind_node(struct sock *sk, + struct hlist_head *list) +{ + hlist_add_head(&sk->__sk_common.skc_bind_node, list); +} +# 821 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct user_namespace *sk_user_ns(struct sock *sk) +{ + + + + + return sk->sk_socket->file->f_cred->user_ns; +} + + +enum sock_flags { + SOCK_DEAD, + SOCK_DONE, + SOCK_URGINLINE, + SOCK_KEEPOPEN, + SOCK_LINGER, + SOCK_DESTROY, + SOCK_BROADCAST, + SOCK_TIMESTAMP, + SOCK_ZAPPED, + SOCK_USE_WRITE_QUEUE, + SOCK_DBG, + SOCK_RCVTSTAMP, + SOCK_RCVTSTAMPNS, + SOCK_LOCALROUTE, + SOCK_QUEUE_SHRUNK, + SOCK_MEMALLOC, + SOCK_TIMESTAMPING_RX_SOFTWARE, + SOCK_FASYNC, + SOCK_RXQ_OVFL, + SOCK_ZEROCOPY, + SOCK_WIFI_STATUS, + SOCK_NOFCS, + + + + SOCK_FILTER_LOCKED, + SOCK_SELECT_ERR_QUEUE, + SOCK_RCU_FREE, + SOCK_TXTIME, + SOCK_XDP, + SOCK_TSTAMP_NEW, +}; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_copy_flags(struct sock *nsk, struct sock *osk) +{ + nsk->__sk_common.skc_flags = osk->__sk_common.skc_flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_set_flag(struct sock *sk, enum sock_flags flag) +{ + __set_bit(flag, &sk->__sk_common.skc_flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_reset_flag(struct sock *sk, enum sock_flags flag) +{ + __clear_bit(flag, &sk->__sk_common.skc_flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sock_flag(const struct sock *sk, enum sock_flags flag) +{ + return test_bit(flag, &sk->__sk_common.skc_flags); +} + + +extern struct static_key_false memalloc_socks_key; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_memalloc_socks(void) +{ + return ({ bool branch; if (__builtin_types_compatible_p(typeof(*&memalloc_socks_key), struct static_key_true)) branch = arch_static_branch_jump(&(&memalloc_socks_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&memalloc_socks_key), struct static_key_false)) branch = arch_static_branch(&(&memalloc_socks_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }); +} +# 902 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) +{ + return gfp_mask | (sk->sk_allocation & (( gfp_t)0x20000u)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_acceptq_removed(struct sock *sk) +{ + do { do { extern void __compiletime_assert_1486(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_ack_backlog) == sizeof(char) || sizeof(sk->sk_ack_backlog) == sizeof(short) || sizeof(sk->sk_ack_backlog) == sizeof(int) || sizeof(sk->sk_ack_backlog) == sizeof(long)) || sizeof(sk->sk_ack_backlog) == sizeof(long long))) __compiletime_assert_1486(); } while (0); do { *(volatile typeof(sk->sk_ack_backlog) *)&(sk->sk_ack_backlog) = (sk->sk_ack_backlog - 1); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_acceptq_added(struct sock *sk) +{ + do { do { extern void __compiletime_assert_1487(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_ack_backlog) == sizeof(char) || sizeof(sk->sk_ack_backlog) == sizeof(short) || sizeof(sk->sk_ack_backlog) == sizeof(int) || sizeof(sk->sk_ack_backlog) == sizeof(long)) || sizeof(sk->sk_ack_backlog) == sizeof(long long))) __compiletime_assert_1487(); } while (0); do { *(volatile typeof(sk->sk_ack_backlog) *)&(sk->sk_ack_backlog) = (sk->sk_ack_backlog + 1); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_acceptq_is_full(const struct sock *sk) +{ + return ({ do { extern void __compiletime_assert_1488(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_ack_backlog) == sizeof(char) || sizeof(sk->sk_ack_backlog) == sizeof(short) || sizeof(sk->sk_ack_backlog) == sizeof(int) || sizeof(sk->sk_ack_backlog) == sizeof(long)) || sizeof(sk->sk_ack_backlog) == sizeof(long long))) __compiletime_assert_1488(); } while (0); ({ typeof( _Generic((sk->sk_ack_backlog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_ack_backlog))) __x = (*(const volatile typeof( _Generic((sk->sk_ack_backlog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_ack_backlog))) *)&(sk->sk_ack_backlog)); do { } while (0); (typeof(sk->sk_ack_backlog))__x; }); }) > ({ do { extern void __compiletime_assert_1489(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_max_ack_backlog) == sizeof(char) || sizeof(sk->sk_max_ack_backlog) == sizeof(short) || sizeof(sk->sk_max_ack_backlog) == sizeof(int) || sizeof(sk->sk_max_ack_backlog) == sizeof(long)) || sizeof(sk->sk_max_ack_backlog) == sizeof(long long))) __compiletime_assert_1489(); } while (0); ({ typeof( _Generic((sk->sk_max_ack_backlog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_max_ack_backlog))) __x = (*(const volatile typeof( _Generic((sk->sk_max_ack_backlog), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_max_ack_backlog))) *)&(sk->sk_max_ack_backlog)); do { } while (0); (typeof(sk->sk_max_ack_backlog))__x; }); }); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_stream_min_wspace(const struct sock *sk) +{ + return ({ do { extern void __compiletime_assert_1490(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_wmem_queued) == sizeof(char) || sizeof(sk->sk_wmem_queued) == sizeof(short) || sizeof(sk->sk_wmem_queued) == sizeof(int) || sizeof(sk->sk_wmem_queued) == sizeof(long)) || sizeof(sk->sk_wmem_queued) == sizeof(long long))) __compiletime_assert_1490(); } while (0); ({ typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) __x = (*(const volatile typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) *)&(sk->sk_wmem_queued)); do { } while (0); (typeof(sk->sk_wmem_queued))__x; }); }) >> 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_stream_wspace(const struct sock *sk) +{ + return ({ do { extern void __compiletime_assert_1491(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_sndbuf) == sizeof(char) || sizeof(sk->sk_sndbuf) == sizeof(short) || sizeof(sk->sk_sndbuf) == sizeof(int) || sizeof(sk->sk_sndbuf) == sizeof(long)) || sizeof(sk->sk_sndbuf) == sizeof(long long))) __compiletime_assert_1491(); } while (0); ({ typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) __x = (*(const volatile typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) *)&(sk->sk_sndbuf)); do { } while (0); (typeof(sk->sk_sndbuf))__x; }); }) - ({ do { extern void __compiletime_assert_1492(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_wmem_queued) == sizeof(char) || sizeof(sk->sk_wmem_queued) == sizeof(short) || sizeof(sk->sk_wmem_queued) == sizeof(int) || sizeof(sk->sk_wmem_queued) == sizeof(long)) || sizeof(sk->sk_wmem_queued) == sizeof(long long))) __compiletime_assert_1492(); } while (0); ({ typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) __x = (*(const volatile typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) *)&(sk->sk_wmem_queued)); do { } while (0); (typeof(sk->sk_wmem_queued))__x; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_wmem_queued_add(struct sock *sk, int val) +{ + do { do { extern void __compiletime_assert_1493(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_wmem_queued) == sizeof(char) || sizeof(sk->sk_wmem_queued) == sizeof(short) || sizeof(sk->sk_wmem_queued) == sizeof(int) || sizeof(sk->sk_wmem_queued) == sizeof(long)) || sizeof(sk->sk_wmem_queued) == sizeof(long long))) __compiletime_assert_1493(); } while (0); do { *(volatile typeof(sk->sk_wmem_queued) *)&(sk->sk_wmem_queued) = (sk->sk_wmem_queued + val); } while (0); } while (0); +} + +void sk_stream_write_space(struct sock *sk); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) +{ + + skb_dst_force(skb); + + if (!sk->sk_backlog.tail) + do { do { extern void __compiletime_assert_1494(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_backlog.head) == sizeof(char) || sizeof(sk->sk_backlog.head) == sizeof(short) || sizeof(sk->sk_backlog.head) == sizeof(int) || sizeof(sk->sk_backlog.head) == sizeof(long)) || sizeof(sk->sk_backlog.head) == sizeof(long long))) __compiletime_assert_1494(); } while (0); do { *(volatile typeof(sk->sk_backlog.head) *)&(sk->sk_backlog.head) = (skb); } while (0); } while (0); + else + sk->sk_backlog.tail->next = skb; + + do { do { extern void __compiletime_assert_1495(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_backlog.tail) == sizeof(char) || sizeof(sk->sk_backlog.tail) == sizeof(short) || sizeof(sk->sk_backlog.tail) == sizeof(int) || sizeof(sk->sk_backlog.tail) == sizeof(long)) || sizeof(sk->sk_backlog.tail) == sizeof(long long))) __compiletime_assert_1495(); } while (0); do { *(volatile typeof(sk->sk_backlog.tail) *)&(sk->sk_backlog.tail) = (skb); } while (0); } while (0); + skb->next = ((void *)0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) +{ + unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_backlog.rmem_alloc); + + return qsize > limit; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((__warn_unused_result__)) int sk_add_backlog(struct sock *sk, struct sk_buff *skb, + unsigned int limit) +{ + if (sk_rcvqueues_full(sk, limit)) + return -105; + + + + + + + if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) + return -12; + + __sk_add_backlog(sk, skb); + sk->sk_backlog.len += skb->truesize; + return 0; +} + +int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) +{ + if (sk_memalloc_socks() && skb_pfmemalloc(skb)) + return __sk_backlog_rcv(sk, skb); + + return sk->sk_backlog_rcv(sk, skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_incoming_cpu_update(struct sock *sk) +{ + int cpu = ({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; }); + + if (__builtin_expect(!!(({ do { extern void __compiletime_assert_1496(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(char) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(short) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(int) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(long)) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(long long))) __compiletime_assert_1496(); } while (0); ({ typeof( _Generic((sk->__sk_common.skc_incoming_cpu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->__sk_common.skc_incoming_cpu))) __x = (*(const volatile typeof( _Generic((sk->__sk_common.skc_incoming_cpu), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->__sk_common.skc_incoming_cpu))) *)&(sk->__sk_common.skc_incoming_cpu)); do { } while (0); (typeof(sk->__sk_common.skc_incoming_cpu))__x; }); }) != cpu), 0)) + do { do { extern void __compiletime_assert_1497(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(char) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(short) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(int) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(long)) || sizeof(sk->__sk_common.skc_incoming_cpu) == sizeof(long long))) __compiletime_assert_1497(); } while (0); do { *(volatile typeof(sk->__sk_common.skc_incoming_cpu) *)&(sk->__sk_common.skc_incoming_cpu) = (cpu); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_rps_record_flow_hash(__u32 hash) +{ + + struct rps_sock_flow_table *sock_flow_table; + + rcu_read_lock(); + sock_flow_table = ({ typeof(*(rps_sock_flow_table)) *________p1 = (typeof(*(rps_sock_flow_table)) *)({ do { extern void __compiletime_assert_1498(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((rps_sock_flow_table)) == sizeof(char) || sizeof((rps_sock_flow_table)) == sizeof(short) || sizeof((rps_sock_flow_table)) == sizeof(int) || sizeof((rps_sock_flow_table)) == sizeof(long)) || sizeof((rps_sock_flow_table)) == sizeof(long long))) __compiletime_assert_1498(); } while (0); ({ typeof( _Generic(((rps_sock_flow_table)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rps_sock_flow_table)))) __x = (*(const volatile typeof( _Generic(((rps_sock_flow_table)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((rps_sock_flow_table)))) *)&((rps_sock_flow_table))); do { } while (0); (typeof((rps_sock_flow_table)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/sock.h", 1013, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(rps_sock_flow_table)) *)(________p1)); }); + rps_record_sock_flow(sock_flow_table, hash); + rcu_read_unlock(); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_rps_record_flow(const struct sock *sk) +{ + + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&rfs_needed), struct static_key_true)) branch = arch_static_branch_jump(&(&rfs_needed)->key, false); else if (__builtin_types_compatible_p(typeof(*&rfs_needed), struct static_key_false)) branch = arch_static_branch(&(&rfs_needed)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); })) { +# 1033 "./include/net/sock.h" + if (sk->__sk_common.skc_state == TCP_ESTABLISHED) + sock_rps_record_flow_hash(sk->__sk_common.skc_rxhash); + } + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_rps_save_rxhash(struct sock *sk, + const struct sk_buff *skb) +{ + + if (__builtin_expect(!!(sk->__sk_common.skc_rxhash != skb->hash), 0)) + sk->__sk_common.skc_rxhash = skb->hash; + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_rps_reset_rxhash(struct sock *sk) +{ + + sk->__sk_common.skc_rxhash = 0; + +} +# 1070 "./include/net/sock.h" +int sk_stream_wait_connect(struct sock *sk, long *timeo_p); +int sk_stream_wait_memory(struct sock *sk, long *timeo_p); +void sk_stream_wait_close(struct sock *sk, long timeo_p); +int sk_stream_error(struct sock *sk, int flags, int err); +void sk_stream_kill_queues(struct sock *sk); +void sk_set_memalloc(struct sock *sk); +void sk_clear_memalloc(struct sock *sk); + +void __sk_flush_backlog(struct sock *sk); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_flush_backlog(struct sock *sk) +{ + if (__builtin_expect(!!(({ do { extern void __compiletime_assert_1499(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_backlog.tail) == sizeof(char) || sizeof(sk->sk_backlog.tail) == sizeof(short) || sizeof(sk->sk_backlog.tail) == sizeof(int) || sizeof(sk->sk_backlog.tail) == sizeof(long)) || sizeof(sk->sk_backlog.tail) == sizeof(long long))) __compiletime_assert_1499(); } while (0); ({ typeof( _Generic((sk->sk_backlog.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_backlog.tail))) __x = (*(const volatile typeof( _Generic((sk->sk_backlog.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_backlog.tail))) *)&(sk->sk_backlog.tail)); do { } while (0); (typeof(sk->sk_backlog.tail))__x; }); })), 0)) { + __sk_flush_backlog(sk); + return true; + } + return false; +} + +int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb); + +struct request_sock_ops; +struct timewait_sock_ops; +struct inet_hashinfo; +struct raw_hashinfo; +struct smc_hashinfo; +struct module; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_prot_clear_nulls(struct sock *sk, int size) +{ + if (__builtin_offsetof(struct sock, __sk_common.skc_node.next) != 0) + memset(sk, 0, __builtin_offsetof(struct sock, __sk_common.skc_node.next)); + memset(&sk->__sk_common.skc_node.pprev, 0, + size - __builtin_offsetof(struct sock, __sk_common.skc_node.pprev)); +} + + + + +struct proto { + void (*close)(struct sock *sk, + long timeout); + int (*pre_connect)(struct sock *sk, + struct sockaddr *uaddr, + int addr_len); + int (*connect)(struct sock *sk, + struct sockaddr *uaddr, + int addr_len); + int (*disconnect)(struct sock *sk, int flags); + + struct sock * (*accept)(struct sock *sk, int flags, int *err, + bool kern); + + int (*ioctl)(struct sock *sk, int cmd, + unsigned long arg); + int (*init)(struct sock *sk); + void (*destroy)(struct sock *sk); + void (*shutdown)(struct sock *sk, int how); + int (*setsockopt)(struct sock *sk, int level, + int optname, char *optval, + unsigned int optlen); + int (*getsockopt)(struct sock *sk, int level, + int optname, char *optval, + int *option); + void (*keepalive)(struct sock *sk, int valbool); + + int (*compat_setsockopt)(struct sock *sk, + int level, + int optname, char *optval, + unsigned int optlen); + int (*compat_getsockopt)(struct sock *sk, + int level, + int optname, char *optval, + int *option); + int (*compat_ioctl)(struct sock *sk, + unsigned int cmd, unsigned long arg); + + int (*sendmsg)(struct sock *sk, struct msghdr *msg, + size_t len); + int (*recvmsg)(struct sock *sk, struct msghdr *msg, + size_t len, int noblock, int flags, + int *addr_len); + int (*sendpage)(struct sock *sk, struct page *page, + int offset, size_t size, int flags); + int (*bind)(struct sock *sk, + struct sockaddr *addr, int addr_len); + int (*bind_add)(struct sock *sk, + struct sockaddr *addr, int addr_len); + + int (*backlog_rcv) (struct sock *sk, + struct sk_buff *skb); + + void (*release_cb)(struct sock *sk); + + + int (*hash)(struct sock *sk); + void (*unhash)(struct sock *sk); + void (*rehash)(struct sock *sk); + int (*get_port)(struct sock *sk, unsigned short snum); + + + + unsigned int inuse_idx; + + + bool (*stream_memory_free)(const struct sock *sk, int wake); + bool (*stream_memory_read)(const struct sock *sk); + + void (*enter_memory_pressure)(struct sock *sk); + void (*leave_memory_pressure)(struct sock *sk); + atomic_long_t *memory_allocated; + struct percpu_counter *sockets_allocated; + + + + + + + unsigned long *memory_pressure; + long *sysctl_mem; + + int *sysctl_wmem; + int *sysctl_rmem; + u32 sysctl_wmem_offset; + u32 sysctl_rmem_offset; + + int max_header; + bool no_autobind; + + struct kmem_cache *slab; + unsigned int obj_size; + slab_flags_t slab_flags; + unsigned int useroffset; + unsigned int usersize; + + struct percpu_counter *orphan_count; + + struct request_sock_ops *rsk_prot; + struct timewait_sock_ops *twsk_prot; + + union { + struct inet_hashinfo *hashinfo; + struct udp_table *udp_table; + struct raw_hashinfo *raw_hash; + struct smc_hashinfo *smc_hash; + } h; + + struct module *owner; + + char name[32]; + + struct list_head node; + + + + int (*diag_destroy)(struct sock *sk, int err); +} __attribute__((__designated_init__)); + +int proto_register(struct proto *prot, int alloc_slab); +void proto_unregister(struct proto *prot); +int sock_load_diag_module(int family, int protocol); +# 1261 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __sk_stream_memory_free(const struct sock *sk, int wake) +{ + if (({ do { extern void __compiletime_assert_1500(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_wmem_queued) == sizeof(char) || sizeof(sk->sk_wmem_queued) == sizeof(short) || sizeof(sk->sk_wmem_queued) == sizeof(int) || sizeof(sk->sk_wmem_queued) == sizeof(long)) || sizeof(sk->sk_wmem_queued) == sizeof(long long))) __compiletime_assert_1500(); } while (0); ({ typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) __x = (*(const volatile typeof( _Generic((sk->sk_wmem_queued), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wmem_queued))) *)&(sk->sk_wmem_queued)); do { } while (0); (typeof(sk->sk_wmem_queued))__x; }); }) >= ({ do { extern void __compiletime_assert_1501(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_sndbuf) == sizeof(char) || sizeof(sk->sk_sndbuf) == sizeof(short) || sizeof(sk->sk_sndbuf) == sizeof(int) || sizeof(sk->sk_sndbuf) == sizeof(long)) || sizeof(sk->sk_sndbuf) == sizeof(long long))) __compiletime_assert_1501(); } while (0); ({ typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) __x = (*(const volatile typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) *)&(sk->sk_sndbuf)); do { } while (0); (typeof(sk->sk_sndbuf))__x; }); })) + return false; + + return sk->__sk_common.skc_prot->stream_memory_free ? + sk->__sk_common.skc_prot->stream_memory_free(sk, wake) : true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_stream_memory_free(const struct sock *sk) +{ + return __sk_stream_memory_free(sk, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __sk_stream_is_writeable(const struct sock *sk, int wake) +{ + return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && + __sk_stream_memory_free(sk, wake); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_stream_is_writeable(const struct sock *sk) +{ + return __sk_stream_is_writeable(sk, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_under_cgroup_hierarchy(struct sock *sk, + struct cgroup *ancestor) +{ + + return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), + ancestor); + + + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_has_memory_pressure(const struct sock *sk) +{ + return sk->__sk_common.skc_prot->memory_pressure != ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_under_memory_pressure(const struct sock *sk) +{ + if (!sk->__sk_common.skc_prot->memory_pressure) + return false; + + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&memcg_sockets_enabled_key), struct static_key_true)) branch = arch_static_branch_jump(&(&memcg_sockets_enabled_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&memcg_sockets_enabled_key), struct static_key_false)) branch = arch_static_branch(&(&memcg_sockets_enabled_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }) && sk->sk_memcg && + mem_cgroup_under_socket_pressure(sk->sk_memcg)) + return true; + + return !!*sk->__sk_common.skc_prot->memory_pressure; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long +sk_memory_allocated(const struct sock *sk) +{ + return atomic_long_read(sk->__sk_common.skc_prot->memory_allocated); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long +sk_memory_allocated_add(struct sock *sk, int amt) +{ + return atomic_long_add_return(amt, sk->__sk_common.skc_prot->memory_allocated); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +sk_memory_allocated_sub(struct sock *sk, int amt) +{ + atomic_long_sub(amt, sk->__sk_common.skc_prot->memory_allocated); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_sockets_allocated_dec(struct sock *sk) +{ + percpu_counter_dec(sk->__sk_common.skc_prot->sockets_allocated); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_sockets_allocated_inc(struct sock *sk) +{ + percpu_counter_inc(sk->__sk_common.skc_prot->sockets_allocated); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 +sk_sockets_allocated_read_positive(struct sock *sk) +{ + return percpu_counter_read_positive(sk->__sk_common.skc_prot->sockets_allocated); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int +proto_sockets_allocated_sum_positive(struct proto *prot) +{ + return percpu_counter_sum_positive(prot->sockets_allocated); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long +proto_memory_allocated(struct proto *prot) +{ + return atomic_long_read(prot->memory_allocated); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +proto_memory_pressure(struct proto *prot) +{ + if (!prot->memory_pressure) + return false; + return !!*prot->memory_pressure; +} + + + + +void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); +int sock_prot_inuse_get(struct net *net, struct proto *proto); +int sock_inuse_get(struct net *net); +# 1385 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __sk_prot_rehash(struct sock *sk) +{ + sk->__sk_common.skc_prot->unhash(sk); + return sk->__sk_common.skc_prot->hash(sk); +} +# 1406 "./include/net/sock.h" +struct socket_alloc { + struct socket socket; + struct inode vfs_inode; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct socket *SOCKET_I(struct inode *inode) +{ + return &({ void *__mptr = (void *)(inode); do { extern void __compiletime_assert_1502(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(inode)), typeof(((struct socket_alloc *)0)->vfs_inode)) && !__builtin_types_compatible_p(typeof(*(inode)), typeof(void))))) __compiletime_assert_1502(); } while (0); ((struct socket_alloc *)(__mptr - __builtin_offsetof(struct socket_alloc, vfs_inode))); })->socket; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct inode *SOCK_INODE(struct socket *socket) +{ + return &({ void *__mptr = (void *)(socket); do { extern void __compiletime_assert_1503(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(socket)), typeof(((struct socket_alloc *)0)->socket)) && !__builtin_types_compatible_p(typeof(*(socket)), typeof(void))))) __compiletime_assert_1503(); } while (0); ((struct socket_alloc *)(__mptr - __builtin_offsetof(struct socket_alloc, socket))); })->vfs_inode; +} + + + + +int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind); +int __sk_mem_schedule(struct sock *sk, int size, int kind); +void __sk_mem_reduce_allocated(struct sock *sk, int amount); +void __sk_mem_reclaim(struct sock *sk, int amount); +# 1438 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long sk_prot_mem_limits(const struct sock *sk, int index) +{ + long val = sk->__sk_common.skc_prot->sysctl_mem[index]; + + + + + + + return val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_mem_pages(int amt) +{ + return (amt + 4096 - 1) >> ( __builtin_constant_p(4096) ? ( __builtin_constant_p(4096) ? ( (4096) < 2 ? 0 : (4096) & (1ULL << 63) ? 63 : (4096) & (1ULL << 62) ? 62 : (4096) & (1ULL << 61) ? 61 : (4096) & (1ULL << 60) ? 60 : (4096) & (1ULL << 59) ? 59 : (4096) & (1ULL << 58) ? 58 : (4096) & (1ULL << 57) ? 57 : (4096) & (1ULL << 56) ? 56 : (4096) & (1ULL << 55) ? 55 : (4096) & (1ULL << 54) ? 54 : (4096) & (1ULL << 53) ? 53 : (4096) & (1ULL << 52) ? 52 : (4096) & (1ULL << 51) ? 51 : (4096) & (1ULL << 50) ? 50 : (4096) & (1ULL << 49) ? 49 : (4096) & (1ULL << 48) ? 48 : (4096) & (1ULL << 47) ? 47 : (4096) & (1ULL << 46) ? 46 : (4096) & (1ULL << 45) ? 45 : (4096) & (1ULL << 44) ? 44 : (4096) & (1ULL << 43) ? 43 : (4096) & (1ULL << 42) ? 42 : (4096) & (1ULL << 41) ? 41 : (4096) & (1ULL << 40) ? 40 : (4096) & (1ULL << 39) ? 39 : (4096) & (1ULL << 38) ? 38 : (4096) & (1ULL << 37) ? 37 : (4096) & (1ULL << 36) ? 36 : (4096) & (1ULL << 35) ? 35 : (4096) & (1ULL << 34) ? 34 : (4096) & (1ULL << 33) ? 33 : (4096) & (1ULL << 32) ? 32 : (4096) & (1ULL << 31) ? 31 : (4096) & (1ULL << 30) ? 30 : (4096) & (1ULL << 29) ? 29 : (4096) & (1ULL << 28) ? 28 : (4096) & (1ULL << 27) ? 27 : (4096) & (1ULL << 26) ? 26 : (4096) & (1ULL << 25) ? 25 : (4096) & (1ULL << 24) ? 24 : (4096) & (1ULL << 23) ? 23 : (4096) & (1ULL << 22) ? 22 : (4096) & (1ULL << 21) ? 21 : (4096) & (1ULL << 20) ? 20 : (4096) & (1ULL << 19) ? 19 : (4096) & (1ULL << 18) ? 18 : (4096) & (1ULL << 17) ? 17 : (4096) & (1ULL << 16) ? 16 : (4096) & (1ULL << 15) ? 15 : (4096) & (1ULL << 14) ? 14 : (4096) & (1ULL << 13) ? 13 : (4096) & (1ULL << 12) ? 12 : (4096) & (1ULL << 11) ? 11 : (4096) & (1ULL << 10) ? 10 : (4096) & (1ULL << 9) ? 9 : (4096) & (1ULL << 8) ? 8 : (4096) & (1ULL << 7) ? 7 : (4096) & (1ULL << 6) ? 6 : (4096) & (1ULL << 5) ? 5 : (4096) & (1ULL << 4) ? 4 : (4096) & (1ULL << 3) ? 3 : (4096) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(4096) <= 4) ? __ilog2_u32(4096) : __ilog2_u64(4096) ); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_has_account(struct sock *sk) +{ + + return !!sk->__sk_common.skc_prot->memory_allocated; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_wmem_schedule(struct sock *sk, int size) +{ + if (!sk_has_account(sk)) + return true; + return size <= sk->sk_forward_alloc || + __sk_mem_schedule(sk, size, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) +{ + if (!sk_has_account(sk)) + return true; + return size<= sk->sk_forward_alloc || + __sk_mem_schedule(sk, size, 1) || + skb_pfmemalloc(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_mem_reclaim(struct sock *sk) +{ + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc >= 4096) + __sk_mem_reclaim(sk, sk->sk_forward_alloc); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_mem_reclaim_partial(struct sock *sk) +{ + if (!sk_has_account(sk)) + return; + if (sk->sk_forward_alloc > 4096) + __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_mem_charge(struct sock *sk, int size) +{ + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc -= size; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_mem_uncharge(struct sock *sk, int size) +{ + if (!sk_has_account(sk)) + return; + sk->sk_forward_alloc += size; +# 1515 "./include/net/sock.h" + if (__builtin_expect(!!(sk->sk_forward_alloc >= 1 << 21), 0)) + __sk_mem_reclaim(sk, 1 << 20); +} + +extern struct static_key_false tcp_tx_skb_cache_key; +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) +{ + sock_set_flag(sk, SOCK_QUEUE_SHRUNK); + sk_wmem_queued_add(sk, -skb->truesize); + sk_mem_uncharge(sk, skb->truesize); + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&tcp_tx_skb_cache_key), struct static_key_true)) branch = arch_static_branch_jump(&(&tcp_tx_skb_cache_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&tcp_tx_skb_cache_key), struct static_key_false)) branch = arch_static_branch(&(&tcp_tx_skb_cache_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }) && + !sk->sk_tx_skb_cache && !skb_cloned(skb)) { + skb_ext_reset(skb); + skb_zcopy_clear(skb, true); + sk->sk_tx_skb_cache = skb; + return; + } + __kfree_skb(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_release_ownership(struct sock *sk) +{ + if (sk->sk_lock.owned) { + sk->sk_lock.owned = 0; + + + lock_release(&sk->sk_lock.dep_map, (unsigned long)__builtin_return_address(0)); + } +} +# 1565 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool lockdep_sock_is_held(const struct sock *sk) +{ + return lock_is_held(&(&sk->sk_lock)->dep_map) || + lock_is_held(&(&sk->sk_lock.slock)->dep_map); +} + + +void lock_sock_nested(struct sock *sk, int subclass); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void lock_sock(struct sock *sk) +{ + lock_sock_nested(sk, 0); +} + +void __release_sock(struct sock *sk); +void release_sock(struct sock *sk); +# 1589 "./include/net/sock.h" +bool lock_sock_fast(struct sock *sk); +# 1598 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void unlock_sock_fast(struct sock *sk, bool slow) +{ + if (slow) + release_sock(sk); + else + spin_unlock_bh(&sk->sk_lock.slock); +} +# 1620 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_owned_by_me(const struct sock *sk) +{ + + ({ int __ret_warn_on = !!(!lockdep_sock_is_held(sk) && debug_locks); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1504)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sock.h"), "i" (1623), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1505)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1506)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sock_owned_by_user(const struct sock *sk) +{ + sock_owned_by_me(sk); + return sk->sk_lock.owned; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sock_owned_by_user_nocheck(const struct sock *sk) +{ + return sk->sk_lock.owned; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sock_allow_reclassification(const struct sock *csk) +{ + struct sock *sk = (struct sock *)csk; + + return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock); +} + +struct sock *sk_alloc(struct net *net, int family, gfp_t priority, + struct proto *prot, int kern); +void sk_free(struct sock *sk); +void sk_destruct(struct sock *sk); +struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); +void sk_free_unlock_clone(struct sock *sk); + +struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, + gfp_t priority); +void __sock_wfree(struct sk_buff *skb); +void sock_wfree(struct sk_buff *skb); +struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, + gfp_t priority); +void skb_orphan_partial(struct sk_buff *skb); +void sock_rfree(struct sk_buff *skb); +void sock_efree(struct sk_buff *skb); + +void sock_edemux(struct sk_buff *skb); +void sock_pfree(struct sk_buff *skb); + + + + +int sock_setsockopt(struct socket *sock, int level, int op, + char *optval, unsigned int optlen); + +int sock_getsockopt(struct socket *sock, int level, int op, + char *optval, int *optlen); +int sock_gettstamp(struct socket *sock, void *userstamp, + bool timeval, bool time32); +struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, + int noblock, int *errcode); +struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, + unsigned long data_len, int noblock, + int *errcode, int max_page_order); +void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); +void sock_kfree_s(struct sock *sk, void *mem, int size); +void sock_kzfree_s(struct sock *sk, void *mem, int size); +void sk_send_sigurg(struct sock *sk); + +struct sockcm_cookie { + u64 transmit_time; + u32 mark; + u16 tsflags; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sockcm_init(struct sockcm_cookie *sockc, + const struct sock *sk) +{ + *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags }; +} + +int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, + struct sockcm_cookie *sockc); +int sock_cmsg_send(struct sock *sk, struct msghdr *msg, + struct sockcm_cookie *sockc); + + + + + +int sock_no_bind(struct socket *, struct sockaddr *, int); +int sock_no_connect(struct socket *, struct sockaddr *, int, int); +int sock_no_socketpair(struct socket *, struct socket *); +int sock_no_accept(struct socket *, struct socket *, int, bool); +int sock_no_getname(struct socket *, struct sockaddr *, int); +int sock_no_ioctl(struct socket *, unsigned int, unsigned long); +int sock_no_listen(struct socket *, int); +int sock_no_shutdown(struct socket *, int); +int sock_no_getsockopt(struct socket *, int , int, char *, int *); +int sock_no_setsockopt(struct socket *, int, int, char *, unsigned int); +int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); +int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len); +int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); +int sock_no_mmap(struct file *file, struct socket *sock, + struct vm_area_struct *vma); +ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags); +ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, + int offset, size_t size, int flags); + + + + + +int sock_common_getsockopt(struct socket *sock, int level, int optname, + char *optval, int *optlen); +int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + int flags); +int sock_common_setsockopt(struct socket *sock, int level, int optname, + char *optval, unsigned int optlen); +int compat_sock_common_getsockopt(struct socket *sock, int level, + int optname, char *optval, int *optlen); +int compat_sock_common_setsockopt(struct socket *sock, int level, + int optname, char *optval, unsigned int optlen); + +void sk_common_release(struct sock *sk); + + + + + + +void sock_init_data(struct socket *sock, struct sock *sk); +# 1777 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_put(struct sock *sk) +{ + if (refcount_dec_and_test(&sk->__sk_common.skc_refcnt)) + sk_free(sk); +} + + + +void sock_gen_put(struct sock *sk); + +int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, + unsigned int trim_cap, bool refcounted); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_receive_skb(struct sock *sk, struct sk_buff *skb, + const int nested) +{ + return __sk_receive_skb(sk, skb, nested, 1, true); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_tx_queue_set(struct sock *sk, int tx_queue) +{ + + if (({ int __ret_warn_on = !!((unsigned short)tx_queue >= ((unsigned short)~0U)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1507)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sock.h"), "i" (1798), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1508)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1509)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return; + sk->__sk_common.skc_tx_queue_mapping = tx_queue; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_tx_queue_clear(struct sock *sk) +{ + sk->__sk_common.skc_tx_queue_mapping = ((unsigned short)~0U); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_tx_queue_get(const struct sock *sk) +{ + if (sk && sk->__sk_common.skc_tx_queue_mapping != ((unsigned short)~0U)) + return sk->__sk_common.skc_tx_queue_mapping; + + return -1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +{ + + if (skb_rx_queue_recorded(skb)) { + u16 rx_queue = skb_get_rx_queue(skb); + + if (({ int __ret_warn_on = !!(rx_queue == ((unsigned short)~0U)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1510)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sock.h"), "i" (1824), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1511)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1512)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); })) + return; + + sk->__sk_common.skc_rx_queue_mapping = rx_queue; + } + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_rx_queue_clear(struct sock *sk) +{ + + sk->__sk_common.skc_rx_queue_mapping = ((unsigned short)~0U); + +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_rx_queue_get(const struct sock *sk) +{ + if (sk && sk->__sk_common.skc_rx_queue_mapping != ((unsigned short)~0U)) + return sk->__sk_common.skc_rx_queue_mapping; + + return -1; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_set_socket(struct sock *sk, struct socket *sock) +{ + sk_tx_queue_clear(sk); + sk->sk_socket = sock; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) wait_queue_head_t *sk_sleep(struct sock *sk) +{ + do { extern void __compiletime_assert_1513(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct socket_wq, wait) != 0"))); if (!(!(__builtin_offsetof(struct socket_wq, wait) != 0))) __compiletime_assert_1513(); } while (0); + return &({ typeof(sk->sk_wq) ________p1 = ({ do { extern void __compiletime_assert_1514(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_wq) == sizeof(char) || sizeof(sk->sk_wq) == sizeof(short) || sizeof(sk->sk_wq) == sizeof(int) || sizeof(sk->sk_wq) == sizeof(long)) || sizeof(sk->sk_wq) == sizeof(long long))) __compiletime_assert_1514(); } while (0); ({ typeof( _Generic((sk->sk_wq), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wq))) __x = (*(const volatile typeof( _Generic((sk->sk_wq), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_wq))) *)&(sk->sk_wq)); do { } while (0); (typeof(sk->sk_wq))__x; }); }); ((typeof(*sk->sk_wq) *)(________p1)); })->wait; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_orphan(struct sock *sk) +{ + _raw_write_lock_bh(&sk->sk_callback_lock); + sock_set_flag(sk, SOCK_DEAD); + sk_set_socket(sk, ((void *)0)); + sk->sk_wq = ((void *)0); + _raw_write_unlock_bh(&sk->sk_callback_lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_graft(struct sock *sk, struct socket *parent) +{ + ({ int __ret_warn_on = !!(parent->sk); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1515)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/net/sock.h"), "i" (1878), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1516)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1517)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + _raw_write_lock_bh(&sk->sk_callback_lock); + do { uintptr_t _r_a_p__v = (uintptr_t)(&parent->wq); ; if (__builtin_constant_p(&parent->wq) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1518(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((sk->sk_wq)) == sizeof(char) || sizeof((sk->sk_wq)) == sizeof(short) || sizeof((sk->sk_wq)) == sizeof(int) || sizeof((sk->sk_wq)) == sizeof(long)) || sizeof((sk->sk_wq)) == sizeof(long long))) __compiletime_assert_1518(); } while (0); do { *(volatile typeof((sk->sk_wq)) *)&((sk->sk_wq)) = ((typeof(sk->sk_wq))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1519(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&sk->sk_wq) == sizeof(char) || sizeof(*&sk->sk_wq) == sizeof(short) || sizeof(*&sk->sk_wq) == sizeof(int) || sizeof(*&sk->sk_wq) == sizeof(long)))) __compiletime_assert_1519(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1520(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&sk->sk_wq) == sizeof(char) || sizeof(*&sk->sk_wq) == sizeof(short) || sizeof(*&sk->sk_wq) == sizeof(int) || sizeof(*&sk->sk_wq) == sizeof(long)) || sizeof(*&sk->sk_wq) == sizeof(long long))) __compiletime_assert_1520(); } while (0); do { *(volatile typeof(*&sk->sk_wq) *)&(*&sk->sk_wq) = ((typeof(*((typeof(sk->sk_wq))_r_a_p__v)) *)((typeof(sk->sk_wq))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + parent->sk = sk; + sk_set_socket(sk, parent); + sk->sk_uid = SOCK_INODE(parent)->i_uid; + security_sock_graft(sk, parent); + _raw_write_unlock_bh(&sk->sk_callback_lock); +} + +kuid_t sock_i_uid(struct sock *sk); +unsigned long sock_i_ino(struct sock *sk); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kuid_t sock_net_uid(const struct net *net, const struct sock *sk) +{ + return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u32 net_tx_rndhash(void) +{ + u32 v = prandom_u32(); + + return v ?: 1; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_set_txhash(struct sock *sk) +{ + sk->sk_txhash = net_tx_rndhash(); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_rethink_txhash(struct sock *sk) +{ + if (sk->sk_txhash) + sk_set_txhash(sk); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dst_entry * +__sk_dst_get(struct sock *sk) +{ + return ({ typeof(*(sk->sk_dst_cache)) *________p1 = (typeof(*(sk->sk_dst_cache)) *)({ do { extern void __compiletime_assert_1521(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((sk->sk_dst_cache)) == sizeof(char) || sizeof((sk->sk_dst_cache)) == sizeof(short) || sizeof((sk->sk_dst_cache)) == sizeof(int) || sizeof((sk->sk_dst_cache)) == sizeof(long)) || sizeof((sk->sk_dst_cache)) == sizeof(long long))) __compiletime_assert_1521(); } while (0); ({ typeof( _Generic(((sk->sk_dst_cache)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_dst_cache)))) __x = (*(const volatile typeof( _Generic(((sk->sk_dst_cache)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_dst_cache)))) *)&((sk->sk_dst_cache))); do { } while (0); (typeof((sk->sk_dst_cache)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_sock_is_held(sk)) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/sock.h", 1917, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(sk->sk_dst_cache)) *)(________p1)); }) + ; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct dst_entry * +sk_dst_get(struct sock *sk) +{ + struct dst_entry *dst; + + rcu_read_lock(); + dst = ({ typeof(*(sk->sk_dst_cache)) *________p1 = (typeof(*(sk->sk_dst_cache)) *)({ do { extern void __compiletime_assert_1522(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((sk->sk_dst_cache)) == sizeof(char) || sizeof((sk->sk_dst_cache)) == sizeof(short) || sizeof((sk->sk_dst_cache)) == sizeof(int) || sizeof((sk->sk_dst_cache)) == sizeof(long)) || sizeof((sk->sk_dst_cache)) == sizeof(long long))) __compiletime_assert_1522(); } while (0); ({ typeof( _Generic(((sk->sk_dst_cache)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_dst_cache)))) __x = (*(const volatile typeof( _Generic(((sk->sk_dst_cache)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_dst_cache)))) *)&((sk->sk_dst_cache))); do { } while (0); (typeof((sk->sk_dst_cache)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/sock.h", 1927, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(sk->sk_dst_cache)) *)(________p1)); }); + if (dst && !atomic_inc_not_zero(&dst->__refcnt)) + dst = ((void *)0); + rcu_read_unlock(); + return dst; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void dst_negative_advice(struct sock *sk) +{ + struct dst_entry *ndst, *dst = __sk_dst_get(sk); + + sk_rethink_txhash(sk); + + if (dst && dst->ops->negative_advice) { + ndst = dst->ops->negative_advice(dst); + + if (ndst != dst) { + do { uintptr_t _r_a_p__v = (uintptr_t)(ndst); ; if (__builtin_constant_p(ndst) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1523(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((sk->sk_dst_cache)) == sizeof(char) || sizeof((sk->sk_dst_cache)) == sizeof(short) || sizeof((sk->sk_dst_cache)) == sizeof(int) || sizeof((sk->sk_dst_cache)) == sizeof(long)) || sizeof((sk->sk_dst_cache)) == sizeof(long long))) __compiletime_assert_1523(); } while (0); do { *(volatile typeof((sk->sk_dst_cache)) *)&((sk->sk_dst_cache)) = ((typeof(sk->sk_dst_cache))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1524(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&sk->sk_dst_cache) == sizeof(char) || sizeof(*&sk->sk_dst_cache) == sizeof(short) || sizeof(*&sk->sk_dst_cache) == sizeof(int) || sizeof(*&sk->sk_dst_cache) == sizeof(long)))) __compiletime_assert_1524(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1525(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&sk->sk_dst_cache) == sizeof(char) || sizeof(*&sk->sk_dst_cache) == sizeof(short) || sizeof(*&sk->sk_dst_cache) == sizeof(int) || sizeof(*&sk->sk_dst_cache) == sizeof(long)) || sizeof(*&sk->sk_dst_cache) == sizeof(long long))) __compiletime_assert_1525(); } while (0); do { *(volatile typeof(*&sk->sk_dst_cache) *)&(*&sk->sk_dst_cache) = ((typeof(*((typeof(sk->sk_dst_cache))_r_a_p__v)) *)((typeof(sk->sk_dst_cache))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; + } + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +__sk_dst_set(struct sock *sk, struct dst_entry *dst) +{ + struct dst_entry *old_dst; + + sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; + old_dst = ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((lockdep_sock_is_held(sk))))) { __warned = true; lockdep_rcu_suspicious("include/net/sock.h", 1958, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(sk->sk_dst_cache)) *)((sk->sk_dst_cache))); }) + ; + do { uintptr_t _r_a_p__v = (uintptr_t)(dst); ; if (__builtin_constant_p(dst) && (_r_a_p__v) == (uintptr_t)((void *)0)) do { do { extern void __compiletime_assert_1526(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((sk->sk_dst_cache)) == sizeof(char) || sizeof((sk->sk_dst_cache)) == sizeof(short) || sizeof((sk->sk_dst_cache)) == sizeof(int) || sizeof((sk->sk_dst_cache)) == sizeof(long)) || sizeof((sk->sk_dst_cache)) == sizeof(long long))) __compiletime_assert_1526(); } while (0); do { *(volatile typeof((sk->sk_dst_cache)) *)&((sk->sk_dst_cache)) = ((typeof(sk->sk_dst_cache))(_r_a_p__v)); } while (0); } while (0); else do { do { extern void __compiletime_assert_1527(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&sk->sk_dst_cache) == sizeof(char) || sizeof(*&sk->sk_dst_cache) == sizeof(short) || sizeof(*&sk->sk_dst_cache) == sizeof(int) || sizeof(*&sk->sk_dst_cache) == sizeof(long)))) __compiletime_assert_1527(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1528(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&sk->sk_dst_cache) == sizeof(char) || sizeof(*&sk->sk_dst_cache) == sizeof(short) || sizeof(*&sk->sk_dst_cache) == sizeof(int) || sizeof(*&sk->sk_dst_cache) == sizeof(long)) || sizeof(*&sk->sk_dst_cache) == sizeof(long long))) __compiletime_assert_1528(); } while (0); do { *(volatile typeof(*&sk->sk_dst_cache) *)&(*&sk->sk_dst_cache) = ((typeof(*((typeof(sk->sk_dst_cache))_r_a_p__v)) *)((typeof(sk->sk_dst_cache))_r_a_p__v)); } while (0); } while (0); } while (0); } while (0); + dst_release(old_dst); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +sk_dst_set(struct sock *sk, struct dst_entry *dst) +{ + struct dst_entry *old_dst; + + sk_tx_queue_clear(sk); + sk->sk_dst_pending_confirm = 0; + old_dst = ({ typeof(( struct dst_entry **)&sk->sk_dst_cache) __ai_ptr = (( struct dst_entry **)&sk->sk_dst_cache); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = ((dst)); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); }); + dst_release(old_dst); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +__sk_dst_reset(struct sock *sk) +{ + __sk_dst_set(sk, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +sk_dst_reset(struct sock *sk) +{ + sk_dst_set(sk, ((void *)0)); +} + +struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); + +struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_dst_confirm(struct sock *sk) +{ + if (!({ do { extern void __compiletime_assert_1529(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_dst_pending_confirm) == sizeof(char) || sizeof(sk->sk_dst_pending_confirm) == sizeof(short) || sizeof(sk->sk_dst_pending_confirm) == sizeof(int) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long)) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long long))) __compiletime_assert_1529(); } while (0); ({ typeof( _Generic((sk->sk_dst_pending_confirm), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_dst_pending_confirm))) __x = (*(const volatile typeof( _Generic((sk->sk_dst_pending_confirm), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_dst_pending_confirm))) *)&(sk->sk_dst_pending_confirm)); do { } while (0); (typeof(sk->sk_dst_pending_confirm))__x; }); })) + do { do { extern void __compiletime_assert_1530(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_dst_pending_confirm) == sizeof(char) || sizeof(sk->sk_dst_pending_confirm) == sizeof(short) || sizeof(sk->sk_dst_pending_confirm) == sizeof(int) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long)) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long long))) __compiletime_assert_1530(); } while (0); do { *(volatile typeof(sk->sk_dst_pending_confirm) *)&(sk->sk_dst_pending_confirm) = (1); } while (0); } while (0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n) +{ + if (skb_get_dst_pending_confirm(skb)) { + struct sock *sk = skb->sk; + unsigned long now = jiffies; + + + if (({ do { extern void __compiletime_assert_1531(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->confirmed) == sizeof(char) || sizeof(n->confirmed) == sizeof(short) || sizeof(n->confirmed) == sizeof(int) || sizeof(n->confirmed) == sizeof(long)) || sizeof(n->confirmed) == sizeof(long long))) __compiletime_assert_1531(); } while (0); ({ typeof( _Generic((n->confirmed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (n->confirmed))) __x = (*(const volatile typeof( _Generic((n->confirmed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (n->confirmed))) *)&(n->confirmed)); do { } while (0); (typeof(n->confirmed))__x; }); }) != now) + do { do { extern void __compiletime_assert_1532(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(n->confirmed) == sizeof(char) || sizeof(n->confirmed) == sizeof(short) || sizeof(n->confirmed) == sizeof(int) || sizeof(n->confirmed) == sizeof(long)) || sizeof(n->confirmed) == sizeof(long long))) __compiletime_assert_1532(); } while (0); do { *(volatile typeof(n->confirmed) *)&(n->confirmed) = (now); } while (0); } while (0); + if (sk && ({ do { extern void __compiletime_assert_1533(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_dst_pending_confirm) == sizeof(char) || sizeof(sk->sk_dst_pending_confirm) == sizeof(short) || sizeof(sk->sk_dst_pending_confirm) == sizeof(int) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long)) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long long))) __compiletime_assert_1533(); } while (0); ({ typeof( _Generic((sk->sk_dst_pending_confirm), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_dst_pending_confirm))) __x = (*(const volatile typeof( _Generic((sk->sk_dst_pending_confirm), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_dst_pending_confirm))) *)&(sk->sk_dst_pending_confirm)); do { } while (0); (typeof(sk->sk_dst_pending_confirm))__x; }); })) + do { do { extern void __compiletime_assert_1534(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_dst_pending_confirm) == sizeof(char) || sizeof(sk->sk_dst_pending_confirm) == sizeof(short) || sizeof(sk->sk_dst_pending_confirm) == sizeof(int) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long)) || sizeof(sk->sk_dst_pending_confirm) == sizeof(long long))) __compiletime_assert_1534(); } while (0); do { *(volatile typeof(sk->sk_dst_pending_confirm) *)&(sk->sk_dst_pending_confirm) = (0); } while (0); } while (0); + } +} + +bool sk_mc_loop(struct sock *sk); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_can_gso(const struct sock *sk) +{ + return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); +} + +void sk_setup_caps(struct sock *sk, struct dst_entry *dst); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_nocaps_add(struct sock *sk, netdev_features_t flags) +{ + sk->sk_route_nocaps |= flags; + sk->sk_route_caps &= ~flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, + struct iov_iter *from, char *to, + int copy, int offset) +{ + if (skb->ip_summed == 0) { + __wsum csum = 0; + if (!csum_and_copy_from_iter_full(to, copy, &csum, from)) + return -14; + skb->csum = csum_block_add(skb->csum, csum, offset); + } else if (sk->sk_route_caps & ((netdev_features_t)1 << (NETIF_F_NOCACHE_COPY_BIT))) { + if (!copy_from_iter_full_nocache(to, copy, from)) + return -14; + } else if (!copy_from_iter_full(to, copy, from)) + return -14; + + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, + struct iov_iter *from, int copy) +{ + int err, offset = skb->len; + + err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), + copy, offset); + if (err) + __skb_trim(skb, offset); + + return err; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, + struct sk_buff *skb, + struct page *page, + int off, int copy) +{ + int err; + + err = skb_do_copy_data_nocache(sk, skb, from, lowmem_page_address(page) + off, + copy, skb->len); + if (err) + return err; + + skb->len += copy; + skb->data_len += copy; + skb->truesize += copy; + sk_wmem_queued_add(sk, copy); + sk_mem_charge(sk, copy); + return 0; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_wmem_alloc_get(const struct sock *sk) +{ + return refcount_read(&sk->sk_wmem_alloc) - 1; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_rmem_alloc_get(const struct sock *sk) +{ + return atomic_read(&sk->sk_backlog.rmem_alloc); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_has_allocations(const struct sock *sk) +{ + return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); +} +# 2141 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool skwq_has_sleeper(struct socket_wq *wq) +{ + return wq && wq_has_sleeper(&wq->wait); +} +# 2154 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_poll_wait(struct file *filp, struct socket *sock, + poll_table *p) +{ + if (!poll_does_not_wait(p)) { + poll_wait(filp, &sock->wq.wait, p); + + + + + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) +{ + if (sk->sk_txhash) { + skb->l4_hash = 1; + skb->hash = sk->sk_txhash; + } +} + +void skb_set_owner_w(struct sk_buff *skb, struct sock *sk); +# 2186 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sock_rfree; + atomic_add(skb->truesize, &sk->sk_backlog.rmem_alloc); + sk_mem_charge(sk, skb->truesize); +} + +void sk_reset_timer(struct sock *sk, struct timer_list *timer, + unsigned long expires); + +void sk_stop_timer(struct sock *sk, struct timer_list *timer); + +int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, + struct sk_buff *skb, unsigned int flags, + void (*destructor)(struct sock *sk, + struct sk_buff *skb)); +int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); + +int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); +struct sk_buff *sock_dequeue_err_skb(struct sock *sk); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sock_error(struct sock *sk) +{ + int err; + if (__builtin_expect(!!(!sk->sk_err), 1)) + return 0; + err = ({ typeof(&sk->sk_err) __ai_ptr = (&sk->sk_err); instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); ({ __typeof__ (*((__ai_ptr))) __ret = ((0)); switch (sizeof(*((__ai_ptr)))) { case 1: asm volatile ("" "xchg" "b %b0, %1\n" : "+q" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 2: asm volatile ("" "xchg" "w %w0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 4: asm volatile ("" "xchg" "l %0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; case 8: asm volatile ("" "xchg" "q %q0, %1\n" : "+r" (__ret), "+m" (*((__ai_ptr))) : : "memory", "cc"); break; default: __xchg_wrong_size(); } __ret; }); }); + return -err; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long sock_wspace(struct sock *sk) +{ + int amt = 0; + + if (!(sk->sk_shutdown & 2)) { + amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); + if (amt < 0) + amt = 0; + } + return amt; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_set_bit(int nr, struct sock *sk) +{ + if ((nr == 0 || nr == 1) && + !sock_flag(sk, SOCK_FASYNC)) + return; + + set_bit(nr, &sk->sk_wq_raw->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_clear_bit(int nr, struct sock *sk) +{ + if ((nr == 0 || nr == 1) && + !sock_flag(sk, SOCK_FASYNC)) + return; + + clear_bit(nr, &sk->sk_wq_raw->flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_wake_async(const struct sock *sk, int how, int band) +{ + if (sock_flag(sk, SOCK_FASYNC)) { + rcu_read_lock(); + sock_wake_async(({ typeof(*(sk->sk_wq)) *________p1 = (typeof(*(sk->sk_wq)) *)({ do { extern void __compiletime_assert_1535(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((sk->sk_wq)) == sizeof(char) || sizeof((sk->sk_wq)) == sizeof(short) || sizeof((sk->sk_wq)) == sizeof(int) || sizeof((sk->sk_wq)) == sizeof(long)) || sizeof((sk->sk_wq)) == sizeof(long long))) __compiletime_assert_1535(); } while (0); ({ typeof( _Generic(((sk->sk_wq)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_wq)))) __x = (*(const volatile typeof( _Generic(((sk->sk_wq)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((sk->sk_wq)))) *)&((sk->sk_wq))); do { } while (0); (typeof((sk->sk_wq)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_held()))) { __warned = true; lockdep_rcu_suspicious("include/net/sock.h", 2261, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(sk->sk_wq)) *)(________p1)); }), how, band); + rcu_read_unlock(); + } +} +# 2276 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_stream_moderate_sndbuf(struct sock *sk) +{ + u32 val; + + if (sk->sk_userlocks & 1) + return; + + val = __builtin_choose_expr(((!!(sizeof((typeof(sk->sk_sndbuf) *)1 == (typeof(sk->sk_wmem_queued >> 1) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(sk->sk_sndbuf) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(sk->sk_wmem_queued >> 1) * 0l)) : (int *)8))))), ((sk->sk_sndbuf) < (sk->sk_wmem_queued >> 1) ? (sk->sk_sndbuf) : (sk->sk_wmem_queued >> 1)), ({ typeof(sk->sk_sndbuf) __UNIQUE_ID___x1536 = (sk->sk_sndbuf); typeof(sk->sk_wmem_queued >> 1) __UNIQUE_ID___y1537 = (sk->sk_wmem_queued >> 1); ((__UNIQUE_ID___x1536) < (__UNIQUE_ID___y1537) ? (__UNIQUE_ID___x1536) : (__UNIQUE_ID___y1537)); })); + + do { do { extern void __compiletime_assert_1540(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_sndbuf) == sizeof(char) || sizeof(sk->sk_sndbuf) == sizeof(short) || sizeof(sk->sk_sndbuf) == sizeof(int) || sizeof(sk->sk_sndbuf) == sizeof(long)) || sizeof(sk->sk_sndbuf) == sizeof(long long))) __compiletime_assert_1540(); } while (0); do { *(volatile typeof(sk->sk_sndbuf) *)&(sk->sk_sndbuf) = (__builtin_choose_expr(((!!(sizeof((typeof((u32)(val)) *)1 == (typeof((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1))) * 2))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(val)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1))) * 2))) * 0l)) : (int *)8))))), (((u32)(val)) > ((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1))) * 2))) ? ((u32)(val)) : ((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1))) * 2)))), ({ typeof((u32)(val)) __UNIQUE_ID___x1538 = ((u32)(val)); typeof((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1))) * 2))) __UNIQUE_ID___y1539 = ((u32)(((2048 + ((((sizeof(struct sk_buff))) + ((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1)) & ~((typeof((sizeof(struct sk_buff))))(((1 << (6)))) - 1))) * 2))); ((__UNIQUE_ID___x1538) > (__UNIQUE_ID___y1539) ? (__UNIQUE_ID___x1538) : (__UNIQUE_ID___y1539)); }))); } while (0); } while (0); +} + +struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule); +# 2306 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct page_frag *sk_page_frag(struct sock *sk) +{ + if (gfpflags_normal_context(sk->sk_allocation)) + return &get_current()->task_frag; + + return &sk->sk_frag; +} + +bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sock_writeable(const struct sock *sk) +{ + return refcount_read(&sk->sk_wmem_alloc) < (({ do { extern void __compiletime_assert_1541(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_sndbuf) == sizeof(char) || sizeof(sk->sk_sndbuf) == sizeof(short) || sizeof(sk->sk_sndbuf) == sizeof(int) || sizeof(sk->sk_sndbuf) == sizeof(long)) || sizeof(sk->sk_sndbuf) == sizeof(long long))) __compiletime_assert_1541(); } while (0); ({ typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) __x = (*(const volatile typeof( _Generic((sk->sk_sndbuf), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_sndbuf))) *)&(sk->sk_sndbuf)); do { } while (0); (typeof(sk->sk_sndbuf))__x; }); }) >> 1); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gfp_t gfp_any(void) +{ + return ((preempt_count() & (((1UL << (8))-1) << (0 + 8)))) ? ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u)) : ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long sock_rcvtimeo(const struct sock *sk, bool noblock) +{ + return noblock ? 0 : sk->sk_rcvtimeo; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long sock_sndtimeo(const struct sock *sk, bool noblock) +{ + return noblock ? 0 : sk->sk_sndtimeo; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sock_rcvlowat(const struct sock *sk, int waitall, int len) +{ + int v = waitall ? len : __builtin_choose_expr(((!!(sizeof((typeof((int)(({ do { extern void __compiletime_assert_1542(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_1542(); } while (0); ({ typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) __x = (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); do { } while (0); (typeof(sk->sk_rcvlowat))__x; }); }))) *)1 == (typeof((int)(len)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(({ do { extern void __compiletime_assert_1542(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_1542(); } while (0); ({ typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) __x = (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); do { } while (0); (typeof(sk->sk_rcvlowat))__x; }); }))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((int)(len)) * 0l)) : (int *)8))))), (((int)(({ do { extern void __compiletime_assert_1542(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_1542(); } while (0); ({ typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) __x = (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); do { } while (0); (typeof(sk->sk_rcvlowat))__x; }); }))) < ((int)(len)) ? ((int)(({ do { extern void __compiletime_assert_1542(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_1542(); } while (0); ({ typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) __x = (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); do { } while (0); (typeof(sk->sk_rcvlowat))__x; }); }))) : ((int)(len))), ({ typeof((int)(({ do { extern void __compiletime_assert_1542(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_1542(); } while (0); ({ typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) __x = (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); do { } while (0); (typeof(sk->sk_rcvlowat))__x; }); }))) __UNIQUE_ID___x1543 = ((int)(({ do { extern void __compiletime_assert_1542(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_rcvlowat) == sizeof(char) || sizeof(sk->sk_rcvlowat) == sizeof(short) || sizeof(sk->sk_rcvlowat) == sizeof(int) || sizeof(sk->sk_rcvlowat) == sizeof(long)) || sizeof(sk->sk_rcvlowat) == sizeof(long long))) __compiletime_assert_1542(); } while (0); ({ typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) __x = (*(const volatile typeof( _Generic((sk->sk_rcvlowat), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_rcvlowat))) *)&(sk->sk_rcvlowat)); do { } while (0); (typeof(sk->sk_rcvlowat))__x; }); }))); typeof((int)(len)) __UNIQUE_ID___y1544 = ((int)(len)); ((__UNIQUE_ID___x1543) < (__UNIQUE_ID___y1544) ? (__UNIQUE_ID___x1543) : (__UNIQUE_ID___y1544)); })); + + return v ?: 1; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sock_intr_errno(long timeo) +{ + return timeo == ((long)(~0UL >> 1)) ? -512 : -4; +} + +struct sock_skb_cb { + u32 dropcount; +}; +# 2371 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) +{ + ((struct sock_skb_cb *)((skb)->cb + ((sizeof((((struct sk_buff *)0)->cb)) - sizeof(struct sock_skb_cb)))))->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? + atomic_read(&sk->sk_drops) : 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_drops_add(struct sock *sk, const struct sk_buff *skb) +{ + int segs = __builtin_choose_expr(((!!(sizeof((typeof((u16)(1)) *)1 == (typeof((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((u16)(1)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) * 0l)) : (int *)8))))), (((u16)(1)) > ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) ? ((u16)(1)) : ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs))), ({ typeof((u16)(1)) __UNIQUE_ID___x1545 = ((u16)(1)); typeof((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)) __UNIQUE_ID___y1546 = ((u16)(((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_segs)); ((__UNIQUE_ID___x1545) > (__UNIQUE_ID___y1546) ? (__UNIQUE_ID___x1545) : (__UNIQUE_ID___y1546)); })); + + atomic_add(segs, &sk->sk_drops); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) ktime_t sock_read_timestamp(struct sock *sk) +{ +# 2398 "./include/net/sock.h" + return ({ do { extern void __compiletime_assert_1547(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_stamp) == sizeof(char) || sizeof(sk->sk_stamp) == sizeof(short) || sizeof(sk->sk_stamp) == sizeof(int) || sizeof(sk->sk_stamp) == sizeof(long)) || sizeof(sk->sk_stamp) == sizeof(long long))) __compiletime_assert_1547(); } while (0); ({ typeof( _Generic((sk->sk_stamp), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_stamp))) __x = (*(const volatile typeof( _Generic((sk->sk_stamp), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_stamp))) *)&(sk->sk_stamp)); do { } while (0); (typeof(sk->sk_stamp))__x; }); }); + +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_write_timestamp(struct sock *sk, ktime_t kt) +{ + + + + + + do { do { extern void __compiletime_assert_1548(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_stamp) == sizeof(char) || sizeof(sk->sk_stamp) == sizeof(short) || sizeof(sk->sk_stamp) == sizeof(int) || sizeof(sk->sk_stamp) == sizeof(long)) || sizeof(sk->sk_stamp) == sizeof(long long))) __compiletime_assert_1548(); } while (0); do { *(volatile typeof(sk->sk_stamp) *)&(sk->sk_stamp) = (kt); } while (0); } while (0); + +} + +void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); +void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) +{ + ktime_t kt = skb->tstamp; + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + + + + + + + + if (sock_flag(sk, SOCK_RCVTSTAMP) || + (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || + (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || + (hwtstamps->hwtstamp && + (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) + __sock_recv_timestamp(msg, sk, skb); + else + sock_write_timestamp(sk, kt); + + if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) + __sock_recv_wifi_status(msg, sk, skb); +} + +void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb) +{ + + + + + + if (sk->__sk_common.skc_flags & ((1UL << SOCK_RXQ_OVFL) | (1UL << SOCK_RCVTSTAMP)) || sk->sk_tsflags & (SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE)) + __sock_recv_ts_and_drops(msg, sk, skb); + else if (__builtin_expect(!!(sock_flag(sk, SOCK_TIMESTAMP)), 0)) + sock_write_timestamp(sk, skb->tstamp); + else if (__builtin_expect(!!(sk->sk_stamp == (-1L * 1000000000L)), 0)) + sock_write_timestamp(sk, 0); +} + +void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); +# 2474 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, + __u8 *tx_flags, __u32 *tskey) +{ + if (__builtin_expect(!!(tsflags), 0)) { + __sock_tx_timestamp(tsflags, tx_flags); + if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey && + tsflags & (SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_TX_SCHED | SOF_TIMESTAMPING_TX_ACK)) + *tskey = sk->sk_tskey++; + } + if (__builtin_expect(!!(sock_flag(sk, SOCK_WIFI_STATUS)), 0)) + *tx_flags |= SKBTX_WIFI_STATUS; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sock_tx_timestamp(struct sock *sk, __u16 tsflags, + __u8 *tx_flags) +{ + _sock_tx_timestamp(sk, tsflags, tx_flags, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) +{ + _sock_tx_timestamp(skb->sk, tsflags, &((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags, + &((struct skb_shared_info *)(skb_end_pointer(skb)))->tskey); +} + +extern struct static_key_false tcp_rx_skb_cache_key; +# 2508 "./include/net/sock.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_eat_skb(struct sock *sk, struct sk_buff *skb) +{ + __skb_unlink(skb, &sk->sk_receive_queue); + if (({ bool branch; if (__builtin_types_compatible_p(typeof(*&tcp_rx_skb_cache_key), struct static_key_true)) branch = arch_static_branch_jump(&(&tcp_rx_skb_cache_key)->key, false); else if (__builtin_types_compatible_p(typeof(*&tcp_rx_skb_cache_key), struct static_key_false)) branch = arch_static_branch(&(&tcp_rx_skb_cache_key)->key, false); else branch = ____wrong_branch_error(); __builtin_expect(!!(branch), 0); }) && + !sk->sk_rx_skb_cache) { + sk->sk_rx_skb_cache = skb; + skb_orphan(skb); + return; + } + __kfree_skb(skb); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +struct net *sock_net(const struct sock *sk) +{ + return read_pnet(&sk->__sk_common.skc_net); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) +void sock_net_set(struct sock *sk, struct net *net) +{ + write_pnet(&sk->__sk_common.skc_net, net); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +skb_sk_is_prefetched(struct sk_buff *skb) +{ + + return skb->destructor == sock_pfree; + + + +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_fullsock(const struct sock *sk) +{ + return (1 << sk->__sk_common.skc_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +sk_is_refcounted(struct sock *sk) +{ + + return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sock * +skb_steal_sock(struct sk_buff *skb, bool *refcounted) +{ + if (skb->sk) { + struct sock *sk = skb->sk; + + *refcounted = true; + if (skb_sk_is_prefetched(skb)) + *refcounted = sk_is_refcounted(sk); + skb->destructor = ((void *)0); + skb->sk = ((void *)0); + return sk; + } + *refcounted = false; + return ((void *)0); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, + struct net_device *dev) +{ + + struct sock *sk = skb->sk; + + if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { + skb = sk->sk_validate_xmit_skb(sk, dev, skb); + + } else if (__builtin_expect(!!(skb->decrypted), 0)) { + ({ static struct ratelimit_state _rs = { .lock = (raw_spinlock_t) { .raw_lock = { { .val = { (0) } } }, .magic = 0xdead4ead, .owner_cpu = -1, .owner = ((void *)-1L), .dep_map = { .name = "_rs.lock", .wait_type_inner = LD_WAIT_SPIN, } }, .interval = (5 * 250), .burst = 10, }; if (___ratelimit(&_rs, __func__)) printk("\001" "4" "unencrypted skb with no associated socket - dropping\n"); }); + kfree_skb(skb); + skb = ((void *)0); + + } + + + return skb; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_listener(const struct sock *sk) +{ + return (1 << sk->__sk_common.skc_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); +} + +void sock_enable_timestamp(struct sock *sk, enum sock_flags flag); +int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, + int type); + +bool sk_ns_capable(const struct sock *sk, + struct user_namespace *user_ns, int cap); +bool sk_capable(const struct sock *sk, int cap); +bool sk_net_capable(const struct sock *sk, int cap); + +void sk_get_meminfo(const struct sock *sk, u32 *meminfo); +# 2632 "./include/net/sock.h" +extern __u32 sysctl_wmem_max; +extern __u32 sysctl_rmem_max; + +extern int sysctl_tstamp_allow_data; +extern int sysctl_optmem_max; + +extern __u32 sysctl_wmem_default; +extern __u32 sysctl_rmem_default; + +extern struct static_key_false net_high_order_alloc_disable_key; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_get_wmem0(const struct sock *sk, const struct proto *proto) +{ + + if (proto->sysctl_wmem_offset) + return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset); + + return *proto->sysctl_wmem; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int sk_get_rmem0(const struct sock *sk, const struct proto *proto) +{ + + if (proto->sysctl_rmem_offset) + return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset); + + return *proto->sysctl_rmem; +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sk_pacing_shift_update(struct sock *sk, int val) +{ + if (!sk || !sk_fullsock(sk) || ({ do { extern void __compiletime_assert_1549(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_pacing_shift) == sizeof(char) || sizeof(sk->sk_pacing_shift) == sizeof(short) || sizeof(sk->sk_pacing_shift) == sizeof(int) || sizeof(sk->sk_pacing_shift) == sizeof(long)) || sizeof(sk->sk_pacing_shift) == sizeof(long long))) __compiletime_assert_1549(); } while (0); ({ typeof( _Generic((sk->sk_pacing_shift), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_pacing_shift))) __x = (*(const volatile typeof( _Generic((sk->sk_pacing_shift), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sk->sk_pacing_shift))) *)&(sk->sk_pacing_shift)); do { } while (0); (typeof(sk->sk_pacing_shift))__x; }); }) == val) + return; + do { do { extern void __compiletime_assert_1550(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sk->sk_pacing_shift) == sizeof(char) || sizeof(sk->sk_pacing_shift) == sizeof(short) || sizeof(sk->sk_pacing_shift) == sizeof(int) || sizeof(sk->sk_pacing_shift) == sizeof(long)) || sizeof(sk->sk_pacing_shift) == sizeof(long long))) __compiletime_assert_1550(); } while (0); do { *(volatile typeof(sk->sk_pacing_shift) *)&(sk->sk_pacing_shift) = (val); } while (0); } while (0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool sk_dev_equal_l3scope(struct sock *sk, int dif) +{ + int mdif; + + if (!sk->__sk_common.skc_bound_dev_if || sk->__sk_common.skc_bound_dev_if == dif) + return true; + + mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif); + if (mdif && mdif == sk->__sk_common.skc_bound_dev_if) + return true; + + return false; +} + +void sock_def_readable(struct sock *sk); + +int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); +void sock_enable_timestamps(struct sock *sk); +void sock_no_linger(struct sock *sk); +void sock_set_keepalive(struct sock *sk); +void sock_set_priority(struct sock *sk, u32 priority); +void sock_set_rcvbuf(struct sock *sk, int val); +void sock_set_reuseaddr(struct sock *sk); +void sock_set_reuseport(struct sock *sk); +void sock_set_sndtimeo(struct sock *sk, s64 secs); + +int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len); +# 65 "fs/io_uring.c" 2 +# 1 "./include/net/af_unix.h" 1 + + + + + +# 1 "./include/uapi/linux/un.h" 1 +# 9 "./include/uapi/linux/un.h" +struct sockaddr_un { + __kernel_sa_family_t sun_family; + char sun_path[108]; +}; +# 7 "./include/net/af_unix.h" 2 + + + + +void unix_inflight(struct user_struct *user, struct file *fp); +void unix_notinflight(struct user_struct *user, struct file *fp); +void unix_destruct_scm(struct sk_buff *skb); +void unix_gc(void); +void wait_for_unix_gc(void); +struct sock *unix_get_socket(struct file *filp); +struct sock *unix_peer_get(struct sock *sk); + + + + +extern unsigned int unix_tot_inflight; +extern spinlock_t unix_table_lock; +extern struct hlist_head unix_socket_table[2 * 256]; + +struct unix_address { + refcount_t refcnt; + int len; + unsigned int hash; + struct sockaddr_un name[]; +}; + +struct unix_skb_parms { + struct pid *pid; + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; + + u32 secid; + + u32 consumed; +} __attribute__((__designated_init__)); + +struct scm_stat { + atomic_t nr_fds; +}; +# 57 "./include/net/af_unix.h" +struct unix_sock { + + struct sock sk; + struct unix_address *addr; + struct path path; + struct mutex iolock, bindlock; + struct sock *peer; + struct list_head link; + atomic_long_t inflight; + spinlock_t lock; + unsigned long gc_flags; + + + struct socket_wq peer_wq; + wait_queue_entry_t peer_wake; + struct scm_stat scm_stat; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct unix_sock *unix_sk(const struct sock *sk) +{ + return (struct unix_sock *)sk; +} + + + +long unix_inq_len(struct sock *sk); +long unix_outq_len(struct sock *sk); + + +int unix_sysctl_register(struct net *net); +void unix_sysctl_unregister(struct net *net); +# 66 "fs/io_uring.c" 2 + +# 1 "./include/linux/anon_inodes.h" 1 +# 12 "./include/linux/anon_inodes.h" +struct file_operations; + +struct file *anon_inode_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags); +int anon_inode_getfd(const char *name, const struct file_operations *fops, + void *priv, int flags); +# 68 "fs/io_uring.c" 2 +# 1 "./include/linux/sched/mm.h" 1 +# 10 "./include/linux/sched/mm.h" +# 1 "./include/linux/sync_core.h" 1 + + + + + +# 1 "./arch/x86/include/asm/sync_core.h" 1 +# 14 "./arch/x86/include/asm/sync_core.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void sync_core_before_usermode(void) +{ + + if (( __builtin_constant_p((__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 7*32+11), (unsigned long *)((&boot_cpu_data)->x86_capability)))) ? (__builtin_constant_p(( 7*32+11)) && ( (((( 7*32+11))>>5)==(0) && (1UL<<((( 7*32+11))&31) & ((1<<(( 0*32+ 0) & 31))|0|(1<<(( 0*32+ 5) & 31))|(1<<(( 0*32+ 6) & 31))| (1<<(( 0*32+ 8) & 31))|0|(1<<(( 0*32+24) & 31))|(1<<(( 0*32+15) & 31))| (1<<(( 0*32+25) & 31))|(1<<(( 0*32+26) & 31))) )) || (((( 7*32+11))>>5)==(1) && (1UL<<((( 7*32+11))&31) & ((1<<(( 1*32+29) & 31))|0) )) || (((( 7*32+11))>>5)==(2) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(3) && (1UL<<((( 7*32+11))&31) & ((1<<(( 3*32+20) & 31))) )) || (((( 7*32+11))>>5)==(4) && (1UL<<((( 7*32+11))&31) & (0) )) || (((( 7*32+11))>>5)==(5) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(6) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(7) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(8) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(9) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(10) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(11) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(12) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(13) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(14) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(15) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(16) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(17) && (1UL<<((( 7*32+11))&31) & 0 )) || (((( 7*32+11))>>5)==(18) && (1UL<<((( 7*32+11))&31) & 0 )) || ((int)(sizeof(struct { int:(-!!(19 != 19)); }))) || ((int)(sizeof(struct { int:(-!!(19 != 19)); })))) ? 1 : test_bit(( 7*32+11), (unsigned long *)((&boot_cpu_data)->x86_capability))) : _static_cpu_has(( 7*32+11)) )) + return; + + + + + if (((preempt_count() & (((1UL << (4))-1) << ((0 + 8) + 8)))) || (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))) + return; + sync_core(); +} +# 7 "./include/linux/sync_core.h" 2 +# 11 "./include/linux/sched/mm.h" 2 + + + + +extern struct mm_struct *mm_alloc(void); +# 34 "./include/linux/sched/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmgrab(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_count); +} + +extern void __mmdrop(struct mm_struct *mm); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmdrop(struct mm_struct *mm) +{ + + + + + + if (__builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 0)) + __mmdrop(mm); +} + +void mmdrop(struct mm_struct *mm); +# 74 "./include/linux/sched/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mmget_still_valid(struct mm_struct *mm) +{ + return __builtin_expect(!!(!mm->core_state), 1); +} +# 95 "./include/linux/sched/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mmget(struct mm_struct *mm) +{ + atomic_inc(&mm->mm_users); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + + +extern void mmput(struct mm_struct *); + + + + +void mmput_async(struct mm_struct *); + + + +extern struct mm_struct *get_task_mm(struct task_struct *task); + + + + + +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); + +extern void exit_mm_release(struct task_struct *, struct mm_struct *); + +extern void exec_mm_release(struct task_struct *, struct mm_struct *); + + +extern void mm_update_next_owner(struct mm_struct *mm); + + + + + + + +extern void arch_pick_mmap_layout(struct mm_struct *mm, + struct rlimit *rlim_stack); +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool in_vfork(struct task_struct *tsk) +{ + bool ret; +# 169 "./include/linux/sched/mm.h" + rcu_read_lock(); + ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; + rcu_read_unlock(); + + return ret; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) gfp_t current_gfp_context(gfp_t flags) +{ + if (__builtin_expect(!!(get_current()->flags & (0x00080000 | 0x00040000 | 0x10000000)), 0) + ) { + + + + + if (get_current()->flags & 0x00080000) + flags &= ~((( gfp_t)0x40u) | (( gfp_t)0x80u)); + else if (get_current()->flags & 0x00040000) + flags &= ~(( gfp_t)0x80u); + + if (get_current()->flags & 0x10000000) + flags &= ~(( gfp_t)0x08u); + + } + return flags; +} + + +extern void __fs_reclaim_acquire(void); +extern void __fs_reclaim_release(void); +extern void fs_reclaim_acquire(gfp_t gfp_mask); +extern void fs_reclaim_release(gfp_t gfp_mask); +# 225 "./include/linux/sched/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int memalloc_noio_save(void) +{ + unsigned int flags = get_current()->flags & 0x00080000; + get_current()->flags |= 0x00080000; + return flags; +} +# 240 "./include/linux/sched/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memalloc_noio_restore(unsigned int flags) +{ + get_current()->flags = (get_current()->flags & ~0x00080000) | flags; +} +# 256 "./include/linux/sched/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int memalloc_nofs_save(void) +{ + unsigned int flags = get_current()->flags & 0x00040000; + get_current()->flags |= 0x00040000; + return flags; +} +# 271 "./include/linux/sched/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memalloc_nofs_restore(unsigned int flags) +{ + get_current()->flags = (get_current()->flags & ~0x00040000) | flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int memalloc_noreclaim_save(void) +{ + unsigned int flags = get_current()->flags & 0x00000800; + get_current()->flags |= 0x00000800; + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memalloc_noreclaim_restore(unsigned int flags) +{ + get_current()->flags = (get_current()->flags & ~0x00000800) | flags; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int memalloc_nocma_save(void) +{ + unsigned int flags = get_current()->flags & 0x10000000; + + get_current()->flags |= 0x10000000; + return flags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memalloc_nocma_restore(unsigned int flags) +{ + get_current()->flags = (get_current()->flags & ~0x10000000) | flags; +} +# 323 "./include/linux/sched/mm.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memalloc_use_memcg(struct mem_cgroup *memcg) +{ + ({ int __ret_warn_on = !!(get_current()->active_memcg); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1551)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/sched/mm.h"), "i" (325), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1552)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1553)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + get_current()->active_memcg = memcg; +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void memalloc_unuse_memcg(void) +{ + get_current()->active_memcg = ((void *)0); +} +# 350 "./include/linux/sched/mm.h" +enum { + MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), + MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), + MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), + MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), + MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), +}; + +enum { + MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), +}; + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) +{ + if (get_current()->mm != mm) + return; + if (__builtin_expect(!!(!(atomic_read(&mm->membarrier_state) & MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)), 1) + ) + return; + sync_core_before_usermode(); +} + +extern void membarrier_exec_mmap(struct mm_struct *mm); +# 69 "fs/io_uring.c" 2 + + + +# 1 "./include/linux/hugetlb.h" 1 +# 14 "./include/linux/hugetlb.h" +struct ctl_table; +struct user_struct; +struct mmu_gather; + + +typedef struct { unsigned long pd; } hugepd_t; + + + + + + +# 1 "./include/linux/mempolicy.h" 1 +# 11 "./include/linux/mempolicy.h" +# 1 "./include/linux/dax.h" 1 +# 12 "./include/linux/dax.h" +typedef unsigned long dax_entry_t; + +struct iomap_ops; +struct iomap; +struct dax_device; +struct dax_operations { + + + + + + long (*direct_access)(struct dax_device *, unsigned long, long, + void **, pfn_t *); + + + + + bool (*dax_supported)(struct dax_device *, struct block_device *, int, + sector_t, sector_t); + + size_t (*copy_from_iter)(struct dax_device *, unsigned long, void *, size_t, + struct iov_iter *); + + size_t (*copy_to_iter)(struct dax_device *, unsigned long, void *, size_t, + struct iov_iter *); + + int (*zero_page_range)(struct dax_device *, unsigned long, size_t); +}; + +extern struct attribute_group dax_attribute_group; + + +struct dax_device *dax_get_by_host(const char *host); +struct dax_device *alloc_dax(void *private, const char *host, + const struct dax_operations *ops, unsigned long flags); +void put_dax(struct dax_device *dax_dev); +void kill_dax(struct dax_device *dax_dev); +void dax_write_cache(struct dax_device *dax_dev, bool wc); +bool dax_write_cache_enabled(struct dax_device *dax_dev); +bool __dax_synchronous(struct dax_device *dax_dev); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dax_synchronous(struct dax_device *dax_dev) +{ + return __dax_synchronous(dax_dev); +} +void __set_dax_synchronous(struct dax_device *dax_dev); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_dax_synchronous(struct dax_device *dax_dev) +{ + __set_dax_synchronous(dax_dev); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool daxdev_mapping_supported(struct vm_area_struct *vma, + struct dax_device *dax_dev) +{ + if (!(vma->vm_flags & 0x00800000)) + return true; + if (!((file_inode(vma->vm_file))->i_flags & 8192)) + return false; + return dax_synchronous(dax_dev); +} +# 114 "./include/linux/dax.h" +struct writeback_control; +int bdev_dax_pgoff(struct block_device *, sector_t, size_t, unsigned long *pgoff); + +bool __bdev_dax_supported(struct block_device *bdev, int blocksize); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool bdev_dax_supported(struct block_device *bdev, int blocksize) +{ + return __bdev_dax_supported(bdev, blocksize); +} + +bool __generic_fsdax_supported(struct dax_device *dax_dev, + struct block_device *bdev, int blocksize, sector_t start, + sector_t sectors); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool generic_fsdax_supported(struct dax_device *dax_dev, + struct block_device *bdev, int blocksize, sector_t start, + sector_t sectors) +{ + return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, + sectors); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fs_put_dax(struct dax_device *dax_dev) +{ + put_dax(dax_dev); +} + +struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); +int dax_writeback_mapping_range(struct address_space *mapping, + struct dax_device *dax_dev, struct writeback_control *wbc); + +struct page *dax_layout_busy_page(struct address_space *mapping); +dax_entry_t dax_lock_page(struct page *page); +void dax_unlock_page(struct page *page, dax_entry_t cookie); +# 192 "./include/linux/dax.h" +int dax_read_lock(void); +void dax_read_unlock(int id); +bool dax_alive(struct dax_device *dax_dev); +void *dax_get_private(struct dax_device *dax_dev); +long dax_direct_access(struct dax_device *dax_dev, unsigned long pgoff, long nr_pages, + void **kaddr, pfn_t *pfn); +bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, + int blocksize, sector_t start, sector_t len); +size_t dax_copy_from_iter(struct dax_device *dax_dev, unsigned long pgoff, void *addr, + size_t bytes, struct iov_iter *i); +size_t dax_copy_to_iter(struct dax_device *dax_dev, unsigned long pgoff, void *addr, + size_t bytes, struct iov_iter *i); +int dax_zero_page_range(struct dax_device *dax_dev, unsigned long pgoff, + size_t nr_pages); +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); + +ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, + const struct iomap_ops *ops); +vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, + pfn_t *pfnp, int *errp, const struct iomap_ops *ops); +vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, + enum page_entry_size pe_size, pfn_t pfn); +int dax_delete_mapping_entry(struct address_space *mapping, unsigned long index); +int dax_invalidate_mapping_entry_sync(struct address_space *mapping, + unsigned long index); +int dax_iomap_zero(loff_t pos, unsigned offset, unsigned size, + struct iomap *iomap); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool dax_mapping(struct address_space *mapping) +{ + return mapping->host && ((mapping->host)->i_flags & 8192); +} +# 12 "./include/linux/mempolicy.h" 2 + + + + + +# 1 "./include/uapi/linux/mempolicy.h" 1 +# 19 "./include/uapi/linux/mempolicy.h" +enum { + MPOL_DEFAULT, + MPOL_PREFERRED, + MPOL_BIND, + MPOL_INTERLEAVE, + MPOL_LOCAL, + MPOL_MAX, +}; +# 18 "./include/linux/mempolicy.h" 2 + +struct mm_struct; +# 45 "./include/linux/mempolicy.h" +struct mempolicy { + atomic_t refcnt; + unsigned short mode; + unsigned short flags; + union { + short preferred_node; + nodemask_t nodes; + + } v; + union { + nodemask_t cpuset_mems_allowed; + nodemask_t user_nodemask; + } w; +}; + + + + + + +extern void __mpol_put(struct mempolicy *pol); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mpol_put(struct mempolicy *pol) +{ + if (pol) + __mpol_put(pol); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int mpol_needs_cond_ref(struct mempolicy *pol) +{ + return (pol && (pol->flags & (1 << 0))); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mpol_cond_put(struct mempolicy *pol) +{ + if (mpol_needs_cond_ref(pol)) + __mpol_put(pol); +} + +extern struct mempolicy *__mpol_dup(struct mempolicy *pol); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct mempolicy *mpol_dup(struct mempolicy *pol) +{ + if (pol) + pol = __mpol_dup(pol); + return pol; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void mpol_get(struct mempolicy *pol) +{ + if (pol) + atomic_inc(&pol->refcnt); +} + +extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool mpol_equal(struct mempolicy *a, struct mempolicy *b) +{ + if (a == b) + return true; + return __mpol_equal(a, b); +} +# 119 "./include/linux/mempolicy.h" +struct sp_node { + struct rb_node nd; + unsigned long start, end; + struct mempolicy *policy; +}; + +struct shared_policy { + struct rb_root root; + rwlock_t lock; +}; + +int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); +void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); +int mpol_set_shared_policy(struct shared_policy *info, + struct vm_area_struct *vma, + struct mempolicy *new); +void mpol_free_shared_policy(struct shared_policy *p); +struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, + unsigned long idx); + +struct mempolicy *get_task_policy(struct task_struct *p); +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr); +bool vma_policy_mof(struct vm_area_struct *vma); + +extern void numa_default_policy(void); +extern void numa_policy_init(void); +extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); +extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); + +extern int huge_node(struct vm_area_struct *vma, + unsigned long addr, gfp_t gfp_flags, + struct mempolicy **mpol, nodemask_t **nodemask); +extern bool init_nodemask_of_mempolicy(nodemask_t *mask); +extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, + const nodemask_t *mask); +extern unsigned int mempolicy_slab_node(void); + +extern enum zone_type policy_zone; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_highest_zone(enum zone_type k) +{ + if (k > policy_zone && k != ZONE_MOVABLE) + policy_zone = k; +} + +int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, + const nodemask_t *to, int flags); + + + +extern int mpol_parse_str(char *str, struct mempolicy **mpol); + + +extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); + + +extern bool vma_migratable(struct vm_area_struct *vma); + +extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +extern void mpol_put_task_policy(struct task_struct *); +# 27 "./include/linux/hugetlb.h" 2 + +# 1 "./arch/x86/include/asm/tlbflush.h" 1 +# 12 "./arch/x86/include/asm/tlbflush.h" +# 1 "./arch/x86/include/asm/invpcid.h" 1 + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __invpcid(unsigned long pcid, unsigned long addr, + unsigned long type) +{ + struct { u64 d[2]; } desc = { { pcid, addr } }; + + + + + + + + asm volatile("invpcid %[desc], %[type]" + :: [desc] "m" (desc), [type] "r" (type) : "memory"); +} + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void invpcid_flush_one(unsigned long pcid, + unsigned long addr) +{ + __invpcid(pcid, addr, 0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void invpcid_flush_single_context(unsigned long pcid) +{ + __invpcid(pcid, 0, 1); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void invpcid_flush_all(void) +{ + __invpcid(0, 0, 2); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void invpcid_flush_all_nonglobals(void) +{ + __invpcid(0, 0, 3); +} +# 13 "./arch/x86/include/asm/tlbflush.h" 2 +# 1 "./arch/x86/include/asm/pti.h" 1 + + + + + + +extern void pti_init(void); +extern void pti_check_boottime_disable(void); +extern void pti_finalize(void); +# 14 "./arch/x86/include/asm/tlbflush.h" 2 + + +void __flush_tlb_all(void); + + + +void cr4_update_irqsoff(unsigned long set, unsigned long clear); +unsigned long cr4_read_shadow(void); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cr4_set_bits_irqsoff(unsigned long mask) +{ + cr4_update_irqsoff(mask, 0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cr4_clear_bits_irqsoff(unsigned long mask) +{ + cr4_update_irqsoff(0, mask); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cr4_set_bits(unsigned long mask) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + cr4_set_bits_irqsoff(mask); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cr4_clear_bits(unsigned long mask) +{ + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); + cr4_clear_bits_irqsoff(mask); + do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); +} +# 62 "./arch/x86/include/asm/tlbflush.h" +struct tlb_context { + u64 ctx_id; + u64 tlb_gen; +}; + +struct tlb_state { +# 79 "./arch/x86/include/asm/tlbflush.h" + struct mm_struct *loaded_mm; + + + + + union { + struct mm_struct *last_user_mm; + unsigned long last_user_mm_ibpb; + }; + + u16 loaded_mm_asid; + u16 next_asid; +# 107 "./arch/x86/include/asm/tlbflush.h" + bool is_lazy; +# 118 "./arch/x86/include/asm/tlbflush.h" + bool invalidate_other; + + + + + + + unsigned short user_pcid_flush_mask; + + + + + + unsigned long cr4; +# 152 "./arch/x86/include/asm/tlbflush.h" + struct tlb_context ctxs[6]; +}; +extern __attribute__((section(".discard"), unused)) char __pcpu_scope_cpu_tlbstate; extern __attribute__((section(".data..percpu" "..shared_aligned"))) __typeof__(struct tlb_state) cpu_tlbstate __attribute__((__aligned__((1 << (6))))); + +bool nmi_uaccess_okay(void); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void cr4_init_shadow(void) +{ + do { do { const void *__vpp_verify = (typeof((&(cpu_tlbstate.cr4)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_tlbstate.cr4)) { case 1: do { typedef typeof((cpu_tlbstate.cr4)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (__read_cr4()); (void)pto_tmp__; } switch (sizeof((cpu_tlbstate.cr4))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "qi" ((pto_T__)(__read_cr4()))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "ri" ((pto_T__)(__read_cr4()))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "ri" ((pto_T__)(__read_cr4()))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "re" ((pto_T__)(__read_cr4()))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((cpu_tlbstate.cr4)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (__read_cr4()); (void)pto_tmp__; } switch (sizeof((cpu_tlbstate.cr4))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "qi" ((pto_T__)(__read_cr4()))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "ri" ((pto_T__)(__read_cr4()))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "ri" ((pto_T__)(__read_cr4()))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "re" ((pto_T__)(__read_cr4()))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((cpu_tlbstate.cr4)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (__read_cr4()); (void)pto_tmp__; } switch (sizeof((cpu_tlbstate.cr4))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "qi" ((pto_T__)(__read_cr4()))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "ri" ((pto_T__)(__read_cr4()))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "ri" ((pto_T__)(__read_cr4()))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "re" ((pto_T__)(__read_cr4()))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { typedef typeof((cpu_tlbstate.cr4)) pto_T__; if (0) { pto_T__ pto_tmp__; pto_tmp__ = (__read_cr4()); (void)pto_tmp__; } switch (sizeof((cpu_tlbstate.cr4))) { case 1: asm volatile ("mov" "b %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "qi" ((pto_T__)(__read_cr4()))); break; case 2: asm volatile ("mov" "w %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "ri" ((pto_T__)(__read_cr4()))); break; case 4: asm volatile ("mov" "l %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "ri" ((pto_T__)(__read_cr4()))); break; case 8: asm volatile ("mov" "q %1,""%%""gs"":" "%" "0" : "+m" ((cpu_tlbstate.cr4)) : "re" ((pto_T__)(__read_cr4()))); break; default: __bad_percpu_size(); } } while (0);break; default: __bad_size_call_parameter();break; } } while (0); +} + +extern unsigned long mmu_cr4_features; +extern u32 *trampoline_cr4_features; + +extern void initialize_tlbstate_and_flush(void); +# 183 "./arch/x86/include/asm/tlbflush.h" +struct flush_tlb_info { +# 200 "./arch/x86/include/asm/tlbflush.h" + struct mm_struct *mm; + unsigned long start; + unsigned long end; + u64 new_tlb_gen; + unsigned int stride_shift; + bool freed_tables; +}; + +void flush_tlb_local(void); +void flush_tlb_one_user(unsigned long addr); +void flush_tlb_one_kernel(unsigned long addr); +void flush_tlb_others(const struct cpumask *cpumask, + const struct flush_tlb_info *info); +# 227 "./arch/x86/include/asm/tlbflush.h" +extern void flush_tlb_all(void); +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int stride_shift, + bool freed_tables); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) +{ + flush_tlb_mm_range(vma->vm_mm, a, a + ((1UL) << 12), 12, false); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) u64 inc_mm_tlb_gen(struct mm_struct *mm) +{ + + + + + + + return atomic64_inc_return(&mm->context.tlb_gen); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm) +{ + inc_mm_tlb_gen(mm); + cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); +} + +extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); +# 29 "./include/linux/hugetlb.h" 2 + +struct hugepage_subpool { + spinlock_t lock; + long count; + long max_hpages; + long used_hpages; + + struct hstate *hstate; + long min_hpages; + long rsv_hpages; + +}; + +struct resv_map { + struct kref refs; + spinlock_t lock; + struct list_head regions; + long adds_in_progress; + struct list_head region_cache; + long region_cache_count; + + + + + + + struct page_counter *reservation_counter; + unsigned long pages_per_hpage; + struct cgroup_subsys_state *css; + +}; +# 80 "./include/linux/hugetlb.h" +struct file_region { + struct list_head link; + long from; + long to; + + + + + + + struct page_counter *reservation_counter; + struct cgroup_subsys_state *css; + +}; + +extern struct resv_map *resv_map_alloc(void); +void resv_map_release(struct kref *ref); + +extern spinlock_t hugetlb_lock; +extern int hugetlb_max_hstate __attribute__((__section__(".data..read_mostly"))); + + + +struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, + long min_hpages); +void hugepage_put_subpool(struct hugepage_subpool *spool); + +void reset_vma_resv_huge_pages(struct vm_area_struct *vma); +int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); +int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); + +int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); +long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, + struct page **, struct vm_area_struct **, + unsigned long *, unsigned long *, long, unsigned int, + int *); +void unmap_hugepage_range(struct vm_area_struct *, + unsigned long, unsigned long, struct page *); +void __unmap_hugepage_range_final(struct mmu_gather *tlb, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct page *ref_page); +void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, + unsigned long start, unsigned long end, + struct page *ref_page); +void hugetlb_report_meminfo(struct seq_file *); +int hugetlb_report_node_meminfo(int, char *); +void hugetlb_show_meminfo(void); +unsigned long hugetlb_total_pages(void); +vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, unsigned int flags); +int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, + struct vm_area_struct *dst_vma, + unsigned long dst_addr, + unsigned long src_addr, + struct page **pagep); +int hugetlb_reserve_pages(struct inode *inode, long from, long to, + struct vm_area_struct *vma, + vm_flags_t vm_flags); +long hugetlb_unreserve_pages(struct inode *inode, long start, long end, + long freed); +bool isolate_huge_page(struct page *page, struct list_head *list); +void putback_active_hugepage(struct page *page); +void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); +void free_huge_page(struct page *page); +void hugetlb_fix_reserve_counts(struct inode *inode); +extern struct mutex *hugetlb_fault_mutex_table; +u32 hugetlb_fault_mutex_hash(struct address_space *mapping, unsigned long idx); + +pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); + +struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); + +extern int sysctl_hugetlb_shm_group; +extern struct list_head huge_boot_pages; + + + +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz); +pte_t *huge_pte_offset(struct mm_struct *mm, + unsigned long addr, unsigned long sz); +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, + unsigned long *start, unsigned long *end); +struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write); +struct page *follow_huge_pd(struct vm_area_struct *vma, + unsigned long address, hugepd_t hpd, + int flags, int pdshift); +struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int flags); +struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, + pud_t *pud, int flags); +struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, + pgd_t *pgd, int flags); + +int pmd_huge(pmd_t pmd); +int pud_huge(pud_t pud); +unsigned long hugetlb_change_protection(struct vm_area_struct *vma, + unsigned long address, unsigned long end, pgprot_t newprot); + +bool is_hugetlb_entry_migration(pte_t pte); +# 381 "./include/linux/hugetlb.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int pgd_write(pgd_t pgd) +{ + do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1554)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/hugetlb.h"), "i" (383), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1555)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); + return 0; +} + + + + +enum { + + + + + HUGETLB_SHMFS_INODE = 1, + + + + + HUGETLB_ANONHUGE_INODE = 2, +}; + + +struct hugetlbfs_sb_info { + long max_inodes; + long free_inodes; + spinlock_t stat_lock; + struct hstate *hstate; + struct hugepage_subpool *spool; + kuid_t uid; + kgid_t gid; + umode_t mode; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + +struct hugetlbfs_inode_info { + struct shared_policy policy; + struct inode vfs_inode; + unsigned int seals; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) +{ + return ({ void *__mptr = (void *)(inode); do { extern void __compiletime_assert_1556(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(inode)), typeof(((struct hugetlbfs_inode_info *)0)->vfs_inode)) && !__builtin_types_compatible_p(typeof(*(inode)), typeof(void))))) __compiletime_assert_1556(); } while (0); ((struct hugetlbfs_inode_info *)(__mptr - __builtin_offsetof(struct hugetlbfs_inode_info, vfs_inode))); }); +} + +extern const struct file_operations hugetlbfs_file_operations; +extern const struct vm_operations_struct hugetlb_vm_ops; +struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, + struct user_struct **user, int creat_flags, + int page_size_log); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool is_file_hugepages(struct file *file) +{ + if (file->f_op == &hugetlbfs_file_operations) + return true; + + return is_file_shm_hugepages(file); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hstate *hstate_inode(struct inode *i) +{ + return HUGETLBFS_SB(i->i_sb)->hstate; +} +# 467 "./include/linux/hugetlb.h" +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); + + + + + + +struct hstate { + int next_nid_to_alloc; + int next_nid_to_free; + unsigned int order; + unsigned long mask; + unsigned long max_huge_pages; + unsigned long nr_huge_pages; + unsigned long free_huge_pages; + unsigned long resv_huge_pages; + unsigned long surplus_huge_pages; + unsigned long nr_overcommit_huge_pages; + struct list_head hugepage_activelist; + struct list_head hugepage_freelists[(1 << 10)]; + unsigned int nr_huge_pages_node[(1 << 10)]; + unsigned int free_huge_pages_node[(1 << 10)]; + unsigned int surplus_huge_pages_node[(1 << 10)]; + + + struct cftype cgroup_files_dfl[7]; + struct cftype cgroup_files_legacy[9]; + + char name[32]; +}; + +struct huge_bootmem_page { + struct list_head list; + struct hstate *hstate; +}; + +struct page *alloc_huge_page(struct vm_area_struct *vma, + unsigned long addr, int avoid_reserve); +struct page *alloc_huge_page_node(struct hstate *h, int nid); +struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, + nodemask_t *nmask); +struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, + unsigned long address); +struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, + int nid, nodemask_t *nmask); +int huge_add_to_page_cache(struct page *page, struct address_space *mapping, + unsigned long idx); + + +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) __alloc_bootmem_huge_page(struct hstate *h); +int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) alloc_bootmem_huge_page(struct hstate *h); + +void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) hugetlb_add_hstate(unsigned order); +bool __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) arch_hugetlb_valid_size(unsigned long size); +struct hstate *size_to_hstate(unsigned long size); + + + + + +extern struct hstate hstates[2]; +extern unsigned int default_hstate_idx; + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hstate *hstate_file(struct file *f) +{ + return hstate_inode(file_inode(f)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hstate *hstate_sizelog(int page_size_log) +{ + if (!page_size_log) + return &(hstates[default_hstate_idx]); + + return size_to_hstate(1UL << page_size_log); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hstate *hstate_vma(struct vm_area_struct *vma) +{ + return hstate_file(vma->vm_file); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long huge_page_size(struct hstate *h) +{ + return (unsigned long)((1UL) << 12) << h->order; +} + +extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); + +extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long huge_page_mask(struct hstate *h) +{ + return h->mask; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int huge_page_order(struct hstate *h) +{ + return h->order; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned huge_page_shift(struct hstate *h) +{ + return h->order + 12; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hstate_is_gigantic(struct hstate *h) +{ + return huge_page_order(h) >= 11; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1 << h->order; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int blocks_per_huge_page(struct hstate *h) +{ + return huge_page_size(h) / 512; +} + +# 1 "./arch/x86/include/asm/hugetlb.h" 1 + + + + + +# 1 "./include/asm-generic/hugetlb.h" 1 + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) +{ + return pfn_pte((unsigned long)((page) - ((struct page *)vmemmap_base)), (pgprot)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long huge_pte_write(pte_t pte) +{ + return pte_write(pte); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long huge_pte_dirty(pte_t pte) +{ + return pte_dirty(pte); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t huge_pte_mkwrite(pte_t pte) +{ + return pte_mkwrite(pte); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t huge_pte_mkdirty(pte_t pte) +{ + return pte_mkdirty(pte); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) +{ + return pte_modify(pte, newprot); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + pte_clear(mm, addr, ptep); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + ptep_clear_flush(vma, addr, ptep); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -22; + if (addr & ~huge_page_mask(h)) + return -22; + + return 0; +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t huge_ptep_get(pte_t *ptep) +{ + return ptep_get(ptep); +} + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool gigantic_page_runtime_supported(void) +{ + return 1; +} +# 7 "./arch/x86/include/asm/hugetlb.h" 2 +# 592 "./include/linux/hugetlb.h" 2 + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, unsigned long len) +{ + return 0; +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void arch_clear_hugepage_flags(struct page *page) { } + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, + struct page *page, int writable) +{ + return entry; +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct hstate *page_hstate(struct page *page) +{ + do { if (__builtin_expect(!!(!PageHuge(page)), 0)) { dump_page(page, "VM_BUG_ON_PAGE(" "!PageHuge(page)"")"); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1557)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/hugetlb.h"), "i" (617), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1558)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } } while (0); + return size_to_hstate(page_size(page)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned hstate_index_to_shift(unsigned index) +{ + return hstates[index].order + 12; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int hstate_index(struct hstate *h) +{ + return h - hstates; +} + +unsigned long __basepage_index(struct page *page); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned long basepage_index(struct page *page) +{ + if (!PageCompound(page)) + return page->index; + + return __basepage_index(page); +} + +extern int dissolve_free_huge_page(struct page *page); +extern int dissolve_free_huge_pages(unsigned long start_pfn, + unsigned long end_pfn); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool arch_hugetlb_migration_supported(struct hstate *h) +{ + if ((huge_page_shift(h) == 21) || + (huge_page_shift(h) == 30) || + (huge_page_shift(h) == pgdir_shift)) + return true; + else + return false; +} +# 665 "./include/linux/hugetlb.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hugepage_migration_supported(struct hstate *h) +{ + return arch_hugetlb_migration_supported(h); +} +# 685 "./include/linux/hugetlb.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool hugepage_movable_supported(struct hstate *h) +{ + if (!hugepage_migration_supported(h)) + return false; + + if (hstate_is_gigantic(h)) + return false; + return true; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *huge_pte_lockptr(struct hstate *h, + struct mm_struct *mm, pte_t *pte) +{ + if (huge_page_size(h) == ((1UL) << 21)) + return pmd_lockptr(mm, (pmd_t *) pte); + do { if (__builtin_expect(!!(huge_page_size(h) == ((1UL) << 12)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1559)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/hugetlb.h"), "i" (700), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1560)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); + return &mm->page_table_lock; +} +# 713 "./include/linux/hugetlb.h" +void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hugetlb_count_add(long l, struct mm_struct *mm) +{ + atomic_long_add(l, &mm->hugetlb_usage); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void hugetlb_count_sub(long l, struct mm_struct *mm) +{ + atomic_long_sub(l, &mm->hugetlb_usage); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + set_huge_pte_at(mm, addr, ptep, pte); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t pte) +{ + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); +} +# 901 "./include/linux/hugetlb.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) spinlock_t *huge_pte_lock(struct hstate *h, + struct mm_struct *mm, pte_t *pte) +{ + spinlock_t *ptl; + + ptl = huge_pte_lockptr(h, mm, pte); + spin_lock(ptl); + return ptl; +} + + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) hugetlb_cma_reserve(int order); +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) hugetlb_cma_check(void); +# 73 "fs/io_uring.c" 2 + +# 1 "./include/linux/namei.h" 1 +# 11 "./include/linux/namei.h" +enum { MAX_NESTED_LINKS = 8 }; + + + + + + +enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; +# 52 "./include/linux/namei.h" +extern int path_pts(struct path *path); + +extern int user_path_at_empty(int, const char *, unsigned, struct path *, int *empty); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int user_path_at(int dfd, const char *name, unsigned flags, + struct path *path) +{ + return user_path_at_empty(dfd, name, flags, path, ((void *)0)); +} + +extern int kern_path(const char *, unsigned, struct path *); + +extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int); +extern struct dentry *user_path_create(int, const char *, struct path *, unsigned int); +extern void done_path_create(struct path *, struct dentry *); +extern struct dentry *kern_path_locked(const char *, struct path *); + +extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); +extern struct dentry *lookup_one_len(const char *, struct dentry *, int); +extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); +extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int); + +extern int follow_down_one(struct path *); +extern int follow_down(struct path *); +extern int follow_up(struct path *); + +extern struct dentry *lock_rename(struct dentry *, struct dentry *); +extern void unlock_rename(struct dentry *, struct dentry *); + +extern int __attribute__((__warn_unused_result__)) nd_jump_link(struct path *path); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void nd_terminate_link(void *name, size_t len, size_t maxlen) +{ + ((char *) name)[__builtin_choose_expr(((!!(sizeof((typeof(len) *)1 == (typeof(maxlen) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(len) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(maxlen) * 0l)) : (int *)8))))), ((len) < (maxlen) ? (len) : (maxlen)), ({ typeof(len) __UNIQUE_ID___x1561 = (len); typeof(maxlen) __UNIQUE_ID___y1562 = (maxlen); ((__UNIQUE_ID___x1561) < (__UNIQUE_ID___y1562) ? (__UNIQUE_ID___x1561) : (__UNIQUE_ID___y1562)); }))] = '\0'; +} +# 98 "./include/linux/namei.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool +retry_estale(const long error, const unsigned int flags) +{ + return error == -116 && !(flags & 0x0020); +} +# 75 "fs/io_uring.c" 2 +# 1 "./include/linux/fsnotify.h" 1 +# 15 "./include/linux/fsnotify.h" +# 1 "./include/linux/fsnotify_backend.h" 1 +# 97 "./include/linux/fsnotify_backend.h" +struct fsnotify_group; +struct fsnotify_event; +struct fsnotify_mark; +struct fsnotify_event_private_data; +struct fsnotify_fname; +struct fsnotify_iter_info; + +struct mem_cgroup; +# 117 "./include/linux/fsnotify_backend.h" +struct fsnotify_ops { + int (*handle_event)(struct fsnotify_group *group, + struct inode *inode, + u32 mask, const void *data, int data_type, + const struct qstr *file_name, u32 cookie, + struct fsnotify_iter_info *iter_info); + void (*free_group_priv)(struct fsnotify_group *group); + void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); + void (*free_event)(struct fsnotify_event *event); + + void (*free_mark)(struct fsnotify_mark *mark); +}; + + + + + + +struct fsnotify_event { + struct list_head list; + unsigned long objectid; +}; + + + + + + + +struct fsnotify_group { + const struct fsnotify_ops *ops; +# 157 "./include/linux/fsnotify_backend.h" + refcount_t refcnt; + + + spinlock_t notification_lock; + struct list_head notification_list; + wait_queue_head_t notification_waitq; + unsigned int q_len; + unsigned int max_events; + + + + + + + + unsigned int priority; + bool shutdown; + + + struct mutex mark_mutex; + atomic_t num_marks; + + + atomic_t user_waits; + + struct list_head marks_list; + + struct fasync_struct *fsn_fa; + + struct fsnotify_event *overflow_event; + + + + struct mem_cgroup *memcg; + + + union { + void *private; + + struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + struct ucounts *ucounts; + } inotify_data; + + + struct fanotify_group_private_data { + + struct list_head access_list; + wait_queue_head_t access_waitq; + int flags; + int f_flags; + unsigned int max_marks; + struct user_struct *user; + } fanotify_data; + + }; +}; + + +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE, + FSNOTIFY_EVENT_PATH, + FSNOTIFY_EVENT_INODE, +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct inode *fsnotify_data_inode(const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_INODE: + return data; + case FSNOTIFY_EVENT_PATH: + return d_inode(((const struct path *)data)->dentry); + default: + return ((void *)0); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) const struct path *fsnotify_data_path(const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_PATH: + return data; + default: + return ((void *)0); + } +} + +enum fsnotify_obj_type { + FSNOTIFY_OBJ_TYPE_INODE, + FSNOTIFY_OBJ_TYPE_VFSMOUNT, + FSNOTIFY_OBJ_TYPE_SB, + FSNOTIFY_OBJ_TYPE_COUNT, + FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT +}; + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fsnotify_valid_obj_type(unsigned int type) +{ + return (type < FSNOTIFY_OBJ_TYPE_COUNT); +} + +struct fsnotify_iter_info { + struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT]; + unsigned int report_mask; + int srcu_idx; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool fsnotify_iter_should_report_type( + struct fsnotify_iter_info *iter_info, int type) +{ + return (iter_info->report_mask & (1U << type)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_iter_set_report_type( + struct fsnotify_iter_info *iter_info, int type) +{ + iter_info->report_mask |= (1U << type); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_iter_set_report_type_mark( + struct fsnotify_iter_info *iter_info, int type, + struct fsnotify_mark *mark) +{ + iter_info->marks[type] = mark; + iter_info->report_mask |= (1U << type); +} +# 299 "./include/linux/fsnotify_backend.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct fsnotify_mark *fsnotify_iter_inode_mark( struct fsnotify_iter_info *iter_info) { return (iter_info->report_mask & (1U << FSNOTIFY_OBJ_TYPE_INODE)) ? iter_info->marks[FSNOTIFY_OBJ_TYPE_INODE] : ((void *)0); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct fsnotify_mark *fsnotify_iter_vfsmount_mark( struct fsnotify_iter_info *iter_info) { return (iter_info->report_mask & (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)) ? iter_info->marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] : ((void *)0); } +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct fsnotify_mark *fsnotify_iter_sb_mark( struct fsnotify_iter_info *iter_info) { return (iter_info->report_mask & (1U << FSNOTIFY_OBJ_TYPE_SB)) ? iter_info->marks[FSNOTIFY_OBJ_TYPE_SB] : ((void *)0); } +# 310 "./include/linux/fsnotify_backend.h" +struct fsnotify_mark_connector; +typedef struct fsnotify_mark_connector *fsnotify_connp_t; + + + + + + + +struct fsnotify_mark_connector { + spinlock_t lock; + unsigned short type; + + unsigned short flags; + __kernel_fsid_t fsid; + union { + + fsnotify_connp_t *obj; + + struct fsnotify_mark_connector *destroy_next; + }; + struct hlist_head list; +}; +# 348 "./include/linux/fsnotify_backend.h" +struct fsnotify_mark { + + __u32 mask; + + + refcount_t refcnt; + + + struct fsnotify_group *group; + + + + struct list_head g_list; + + spinlock_t lock; + + struct hlist_node obj_list; + + struct fsnotify_mark_connector *connector; + + __u32 ignored_mask; + + + + unsigned int flags; +}; + + + + + + +extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, + int data_type, const struct qstr *name, u32 cookie); +extern int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, + int data_type); +extern void __fsnotify_inode_delete(struct inode *inode); +extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); +extern void fsnotify_sb_delete(struct super_block *sb); +extern u32 fsnotify_get_cookie(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fsnotify_inode_watches_children(struct inode *inode) +{ + + if (!(inode->i_fsnotify_mask & 0x08000000)) + return 0; + + + return inode->i_fsnotify_mask & ((0x00010000 | 0x00020000 | 0x00040000) | 0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010 | 0x00000020 | 0x00001000); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_update_flags(struct dentry *dentry) +{ + do { if (__builtin_expect(!!(!queued_spin_is_locked(&(&(&dentry->d_lockref.lock)->rlock)->raw_lock)), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1563)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/fsnotify_backend.h"), "i" (405), "i" (0), "i" (sizeof(struct bug_entry))); } while (0); do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.unreachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1564)); }); asm volatile(""); __builtin_unreachable(); } while (0); } while (0); } while (0); +# 414 "./include/linux/fsnotify_backend.h" + if (fsnotify_inode_watches_children(dentry->d_parent->d_inode)) + dentry->d_flags |= 0x00004000; + else + dentry->d_flags &= ~0x00004000; +} + + + + +extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); + +extern void fsnotify_get_group(struct fsnotify_group *group); + +extern void fsnotify_put_group(struct fsnotify_group *group); + +extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); + +extern void fsnotify_destroy_group(struct fsnotify_group *group); + +extern int fsnotify_fasync(int fd, struct file *file, int on); + +extern void fsnotify_destroy_event(struct fsnotify_group *group, + struct fsnotify_event *event); + +extern int fsnotify_add_event(struct fsnotify_group *group, + struct fsnotify_event *event, + int (*merge)(struct list_head *, + struct fsnotify_event *)); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_queue_overflow(struct fsnotify_group *group) +{ + fsnotify_add_event(group, group->overflow_event, ((void *)0)); +} + + +extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); + +extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); + +extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group); + +extern void fsnotify_remove_queued_event(struct fsnotify_group *group, + struct fsnotify_event *event); + + + + +extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn); + +extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn); +extern void fsnotify_init_mark(struct fsnotify_mark *mark, + struct fsnotify_group *group); + +extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, + struct fsnotify_group *group); + +extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn, + __kernel_fsid_t *fsid); + +extern int fsnotify_add_mark(struct fsnotify_mark *mark, + fsnotify_connp_t *connp, unsigned int type, + int allow_dups, __kernel_fsid_t *fsid); +extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, + fsnotify_connp_t *connp, + unsigned int type, int allow_dups, + __kernel_fsid_t *fsid); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fsnotify_add_inode_mark(struct fsnotify_mark *mark, + struct inode *inode, + int allow_dups) +{ + return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, + FSNOTIFY_OBJ_TYPE_INODE, allow_dups, ((void *)0)); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, + struct inode *inode, + int allow_dups) +{ + return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, + FSNOTIFY_OBJ_TYPE_INODE, allow_dups, + ((void *)0)); +} + + +extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, + struct fsnotify_group *group); + +extern void fsnotify_detach_mark(struct fsnotify_mark *mark); + +extern void fsnotify_free_mark(struct fsnotify_mark *mark); + +extern void fsnotify_wait_marks_destroyed(void); + +extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) +{ + fsnotify_clear_marks_by_group(group, (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) +{ + fsnotify_clear_marks_by_group(group, (1U << FSNOTIFY_OBJ_TYPE_INODE)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group) +{ + fsnotify_clear_marks_by_group(group, (1U << FSNOTIFY_OBJ_TYPE_SB)); +} +extern void fsnotify_get_mark(struct fsnotify_mark *mark); +extern void fsnotify_put_mark(struct fsnotify_mark *mark); +extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); +extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_init_event(struct fsnotify_event *event, + unsigned long objectid) +{ + INIT_LIST_HEAD(&event->list); + event->objectid = objectid; +} +# 16 "./include/linux/fsnotify.h" 2 +# 1 "./include/linux/audit.h" 1 +# 14 "./include/linux/audit.h" +# 1 "./include/uapi/linux/audit.h" 1 +# 323 "./include/uapi/linux/audit.h" +enum { + Audit_equal, + Audit_not_equal, + Audit_bitmask, + Audit_bittest, + Audit_lt, + Audit_gt, + Audit_le, + Audit_ge, + Audit_bad +}; +# 449 "./include/uapi/linux/audit.h" +enum audit_nlgrps { + AUDIT_NLGRP_NONE, + AUDIT_NLGRP_READLOG, + __AUDIT_NLGRP_MAX +}; + + +struct audit_status { + __u32 mask; + __u32 enabled; + __u32 failure; + __u32 pid; + __u32 rate_limit; + __u32 backlog_limit; + __u32 lost; + __u32 backlog; + union { + __u32 version; + __u32 feature_bitmap; + }; + __u32 backlog_wait_time; +}; + +struct audit_features { + + __u32 vers; + __u32 mask; + __u32 features; + __u32 lock; +}; +# 487 "./include/uapi/linux/audit.h" +struct audit_tty_status { + __u32 enabled; + __u32 log_passwd; +}; +# 499 "./include/uapi/linux/audit.h" +struct audit_rule_data { + __u32 flags; + __u32 action; + __u32 field_count; + __u32 mask[64]; + __u32 fields[64]; + __u32 values[64]; + __u32 fieldflags[64]; + __u32 buflen; + char buf[0]; +}; +# 15 "./include/linux/audit.h" 2 + + + + +struct audit_sig_info { + uid_t uid; + pid_t pid; + char ctx[]; +}; + +struct audit_buffer; +struct audit_context; +struct inode; +struct netlink_skb_parms; +struct path; +struct linux_binprm; +struct mq_attr; +struct mqstat; +struct audit_watch; +struct audit_tree; +struct sk_buff; + +struct audit_krule { + u32 pflags; + u32 flags; + u32 listnr; + u32 action; + u32 mask[64]; + u32 buflen; + u32 field_count; + char *filterkey; + struct audit_field *fields; + struct audit_field *arch_f; + struct audit_field *inode_f; + struct audit_watch *watch; + struct audit_tree *tree; + struct audit_fsnotify_mark *exe; + struct list_head rlist; + struct list_head list; + u64 prio; +}; + + + + +struct audit_field { + u32 type; + union { + u32 val; + kuid_t uid; + kgid_t gid; + struct { + char *lsm_str; + void *lsm_rule; + }; + }; + u32 op; +}; + +enum audit_ntp_type { + AUDIT_NTP_OFFSET, + AUDIT_NTP_FREQ, + AUDIT_NTP_STATUS, + AUDIT_NTP_TAI, + AUDIT_NTP_TICK, + AUDIT_NTP_ADJUST, + + AUDIT_NTP_NVALS +}; + + +struct audit_ntp_val { + long long oldval, newval; +}; + +struct audit_ntp_data { + struct audit_ntp_val vals[AUDIT_NTP_NVALS]; +}; + + + + +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER, + AUDIT_XT_OP_REPLACE, + AUDIT_XT_OP_UNREGISTER, +}; + +extern int is_audit_feature_set(int which); + +extern int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) audit_register_class(int class, unsigned *list); +extern int audit_classify_syscall(int abi, unsigned syscall); +extern int audit_classify_arch(int arch); + +extern unsigned compat_write_class[]; +extern unsigned compat_read_class[]; +extern unsigned compat_dir_class[]; +extern unsigned compat_chattr_class[]; +extern unsigned compat_signal_class[]; + +extern int audit_classify_compat_syscall(int abi, unsigned syscall); +# 131 "./include/linux/audit.h" +struct filename; + + + + + + + +extern __attribute__((__format__(printf, 4, 5))) +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, + const char *fmt, ...); + +extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); +extern __attribute__((__format__(printf, 2, 3))) +void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); +extern void audit_log_end(struct audit_buffer *ab); +extern bool audit_string_contains_control(const char *string, + size_t len); +extern void audit_log_n_hex(struct audit_buffer *ab, + const unsigned char *buf, + size_t len); +extern void audit_log_n_string(struct audit_buffer *ab, + const char *buf, + size_t n); +extern void audit_log_n_untrustedstring(struct audit_buffer *ab, + const char *string, + size_t n); +extern void audit_log_untrustedstring(struct audit_buffer *ab, + const char *string); +extern void audit_log_d_path(struct audit_buffer *ab, + const char *prefix, + const struct path *path); +extern void audit_log_key(struct audit_buffer *ab, + char *key); +extern void audit_log_path_denied(int type, + const char *operation); +extern void audit_log_lost(const char *message); + +extern int audit_log_task_context(struct audit_buffer *ab); +extern void audit_log_task_info(struct audit_buffer *ab); + +extern int audit_update_lsm_rules(void); + + +extern int audit_rule_change(int type, int seq, void *data, size_t datasz); +extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); + +extern int audit_set_loginuid(kuid_t loginuid); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) kuid_t audit_get_loginuid(struct task_struct *tsk) +{ + return tsk->loginuid; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int audit_get_sessionid(struct task_struct *tsk) +{ + return tsk->sessionid; +} + +extern u32 audit_enabled; + +extern int audit_signal_info(int sig, struct task_struct *t); +# 266 "./include/linux/audit.h" +# 1 "./arch/x86/include/asm/syscall.h" 1 +# 19 "./arch/x86/include/asm/syscall.h" +typedef long (*sys_call_ptr_t)(const struct pt_regs *); +extern const sys_call_ptr_t sys_call_table[]; + + + + + + +extern const sys_call_ptr_t ia32_sys_call_table[]; + + + +extern const sys_call_ptr_t x32_sys_call_table[]; + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) +{ + return regs->orig_ax; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->ax = regs->orig_ax; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->ax; + + + + + + if (task->thread_info.status & (0x0002|0x0004)) + + + + + error = (long) (int) error; + + return __builtin_expect(!!((unsigned long)(void *)(error) >= (unsigned long)-4095), 0) ? error : 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->ax; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->ax = (long) error ?: val; +} +# 107 "./arch/x86/include/asm/syscall.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + + if (task->thread_info.status & 0x0002) { + *args++ = regs->bx; + *args++ = regs->cx; + *args++ = regs->dx; + *args++ = regs->si; + *args++ = regs->di; + *args = regs->bp; + } else + + { + *args++ = regs->di; + *args++ = regs->si; + *args++ = regs->dx; + *args++ = regs->r10; + *args++ = regs->r8; + *args = regs->r9; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + + if (task->thread_info.status & 0x0002) { + regs->bx = *args++; + regs->cx = *args++; + regs->dx = *args++; + regs->si = *args++; + regs->di = *args++; + regs->bp = *args; + } else + + { + regs->di = *args++; + regs->si = *args++; + regs->dx = *args++; + regs->r10 = *args++; + regs->r8 = *args++; + regs->r9 = *args; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int syscall_get_arch(struct task_struct *task) +{ + + return (1 && + task->thread_info.status & 0x0002) + ? (3|0x40000000) : (62|0x80000000|0x40000000); +} + +void do_syscall_64(unsigned long nr, struct pt_regs *regs); +void do_int80_syscall_32(struct pt_regs *regs); +long do_fast_syscall_32(struct pt_regs *regs); +# 267 "./include/linux/audit.h" 2 + + + +extern int audit_alloc(struct task_struct *task); +extern void __audit_free(struct task_struct *task); +extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3); +extern void __audit_syscall_exit(int ret_success, long ret_value); +extern struct filename *__audit_reusename(const char *uptr); +extern void __audit_getname(struct filename *name); + +extern void __audit_inode(struct filename *name, const struct dentry *dentry, + unsigned int flags); +extern void __audit_file(const struct file *); +extern void __audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type); +extern void audit_seccomp(unsigned long syscall, long signr, int code); +extern void audit_seccomp_actions_logged(const char *names, + const char *old_names, int res); +extern void __audit_ptrace(struct task_struct *t); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_set_context(struct task_struct *task, struct audit_context *ctx) +{ + task->audit_context = ctx; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct audit_context *audit_context(void) +{ + return get_current()->audit_context; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool audit_dummy_context(void) +{ + void *p = audit_context(); + return !p || *(int *)p; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_free(struct task_struct *task) +{ + if (__builtin_expect(!!(task->audit_context), 0)) + __audit_free(task); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_syscall_entry(int major, unsigned long a0, + unsigned long a1, unsigned long a2, + unsigned long a3) +{ + if (__builtin_expect(!!(audit_context()), 0)) + __audit_syscall_entry(major, a0, a1, a2, a3); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_syscall_exit(void *pt_regs) +{ + if (__builtin_expect(!!(audit_context()), 0)) { + int success = (!__builtin_expect(!!((unsigned long)(void *)((unsigned long)(regs_return_value(pt_regs))) >= (unsigned long)-4095), 0)); + long return_code = regs_return_value(pt_regs); + + __audit_syscall_exit(success, return_code); + } +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct filename *audit_reusename(const char *name) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + return __audit_reusename(name); + return ((void *)0); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_getname(struct filename *name) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_getname(name); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_inode(struct filename *name, + const struct dentry *dentry, + unsigned int aflags) { + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_inode(name, dentry, aflags); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_file(struct file *file) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_file(file); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_inode_parent_hidden(struct filename *name, + const struct dentry *dentry) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_inode(name, dentry, + 1 | 2); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_inode_child(struct inode *parent, + const struct dentry *dentry, + const unsigned char type) { + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_inode_child(parent, dentry, type); +} +void audit_core_dumps(long signr); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_ptrace(struct task_struct *t) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_ptrace(t); +} + + +extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); +extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); +extern void __audit_bprm(struct linux_binprm *bprm); +extern int __audit_socketcall(int nargs, unsigned long *args); +extern int __audit_sockaddr(int len, void *addr); +extern void __audit_fd_pair(int fd1, int fd2); +extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); +extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout); +extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); +extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); +extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old); +extern void __audit_log_capset(const struct cred *new, const struct cred *old); +extern void __audit_mmap_fd(int fd, int flags); +extern void __audit_log_kern_module(char *name); +extern void __audit_fanotify(unsigned int response); +extern void __audit_tk_injoffset(struct timespec64 offset); +extern void __audit_ntp_log(const struct audit_ntp_data *ad); +extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, + enum audit_nfcfgop op); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_ipc_obj(struct kern_ipc_perm *ipcp) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_ipc_obj(ipcp); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_fd_pair(int fd1, int fd2) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_fd_pair(fd1, fd2); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_ipc_set_perm(qbytes, uid, gid, mode); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_bprm(struct linux_binprm *bprm) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_bprm(bprm); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int audit_socketcall(int nargs, unsigned long *args) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + return __audit_socketcall(nargs, args); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int audit_socketcall_compat(int nargs, u32 *args) +{ + unsigned long a[6]; + int i; + + if (audit_dummy_context()) + return 0; + + for (i = 0; i < nargs; i++) + a[i] = (unsigned long)args[i]; + return __audit_socketcall(nargs, a); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int audit_sockaddr(int len, void *addr) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + return __audit_sockaddr(len, addr); + return 0; +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_mq_open(oflag, mode, attr); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_mq_notify(mqdes, notification); +} +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_mq_getsetattr(mqdes, mqstat); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + return __audit_log_bprm_fcaps(bprm, new, old); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_log_capset(const struct cred *new, + const struct cred *old) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_log_capset(new, old); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_mmap_fd(int fd, int flags) +{ + if (__builtin_expect(!!(!audit_dummy_context()), 0)) + __audit_mmap_fd(fd, flags); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_log_kern_module(char *name) +{ + if (!audit_dummy_context()) + __audit_log_kern_module(name); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_fanotify(unsigned int response) +{ + if (!audit_dummy_context()) + __audit_fanotify(response); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_tk_injoffset(struct timespec64 offset) +{ + + if (offset.tv_sec == 0 && offset.tv_nsec == 0) + return; + + if (!audit_dummy_context()) + __audit_tk_injoffset(offset); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_ntp_init(struct audit_ntp_data *ad) +{ + memset(ad, 0, sizeof(*ad)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_ntp_set_old(struct audit_ntp_data *ad, + enum audit_ntp_type type, long long val) +{ + ad->vals[type].oldval = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_ntp_set_new(struct audit_ntp_data *ad, + enum audit_ntp_type type, long long val) +{ + ad->vals[type].newval = val; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_ntp_log(const struct audit_ntp_data *ad) +{ + if (!audit_dummy_context()) + __audit_ntp_log(ad); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_log_nfcfg(const char *name, u8 af, + unsigned int nentries, + enum audit_nfcfgop op) +{ + if (audit_enabled) + __audit_log_nfcfg(name, af, nentries, op); +} + +extern int audit_n_rules; +extern int audit_signals; +# 675 "./include/linux/audit.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool audit_loginuid_set(struct task_struct *tsk) +{ + return uid_valid(audit_get_loginuid(tsk)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void audit_log_string(struct audit_buffer *ab, const char *buf) +{ + audit_log_n_string(ab, buf, strlen(buf)); +} +# 17 "./include/linux/fsnotify.h" 2 +# 28 "./include/linux/fsnotify.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_name(struct inode *dir, __u32 mask, + struct inode *child, + const struct qstr *name, u32 cookie) +{ + fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie); + + + + + + fsnotify(dir, 0x00080000, dir, FSNOTIFY_EVENT_INODE, name, 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_dirent(struct inode *dir, struct dentry *dentry, + __u32 mask) +{ + fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_dentry(struct dentry *dentry, __u32 mask) +{ + struct inode *inode = d_inode(dentry); + + if ((((inode->i_mode) & 00170000) == 0040000)) + mask |= 0x40000000; + + fsnotify_parent(dentry, mask, inode, FSNOTIFY_EVENT_INODE); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, ((void *)0), 0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fsnotify_file(struct file *file, __u32 mask) +{ + const struct path *path = &file->f_path; + struct inode *inode = file_inode(file); + int ret; + + if (file->f_mode & (( fmode_t)0x4000000)) + return 0; + + if ((((inode->i_mode) & 00170000) == 0040000)) + mask |= 0x40000000; + + ret = fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); + if (ret) + return ret; + + return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, ((void *)0), 0); +} + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int fsnotify_perm(struct file *file, int mask) +{ + int ret; + __u32 fsnotify_mask = 0; + + if (!(mask & (0x00000004 | 0x00000020))) + return 0; + + if (mask & 0x00000020) { + fsnotify_mask = 0x00010000; + + if (file->f_flags & (( int) (( fmode_t)0x20))) { + ret = fsnotify_file(file, 0x00040000); + + if (ret) + return ret; + } + } else if (mask & 0x00000004) { + fsnotify_mask = 0x00020000; + } + + return fsnotify_file(file, fsnotify_mask); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_link_count(struct inode *inode) +{ + __u32 mask = 0x00000004; + + if ((((inode->i_mode) & 00170000) == 0040000)) + mask |= 0x40000000; + + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, ((void *)0), 0); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_move(struct inode *old_dir, struct inode *new_dir, + const struct qstr *old_name, + int isdir, struct inode *target, + struct dentry *moved) +{ + struct inode *source = moved->d_inode; + u32 fs_cookie = fsnotify_get_cookie(); + __u32 old_dir_mask = 0x00000040; + __u32 new_dir_mask = 0x00000080; + __u32 mask = 0x00000800; + const struct qstr *new_name = &moved->d_name; + + if (old_dir == new_dir) + old_dir_mask |= 0x10000000; + + if (isdir) { + old_dir_mask |= 0x40000000; + new_dir_mask |= 0x40000000; + mask |= 0x40000000; + } + + fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie); + fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie); + + if (target) + fsnotify_link_count(target); + + if (source) + fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, ((void *)0), 0); + audit_inode_child(new_dir, moved, 4); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_inode_delete(struct inode *inode) +{ + __fsnotify_inode_delete(inode); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_vfsmount_delete(struct vfsmount *mnt) +{ + __fsnotify_vfsmount_delete(mnt); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_inoderemove(struct inode *inode) +{ + __u32 mask = 0x00000400; + + if ((((inode->i_mode) & 00170000) == 0040000)) + mask |= 0x40000000; + + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, ((void *)0), 0); + __fsnotify_inode_delete(inode); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_create(struct inode *inode, struct dentry *dentry) +{ + audit_inode_child(inode, dentry, 4); + + fsnotify_dirent(inode, dentry, 0x00000100); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_link(struct inode *dir, struct inode *inode, + struct dentry *new_dentry) +{ + fsnotify_link_count(inode); + audit_inode_child(dir, new_dentry, 4); + + fsnotify_name(dir, 0x00000100, inode, &new_dentry->d_name, 0); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_unlink(struct inode *dir, struct dentry *dentry) +{ + + ({ int __ret_warn_on = !!(d_is_negative(dentry)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1565)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/fsnotify.h"), "i" (216), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1566)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1567)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + + fsnotify_dirent(dir, dentry, 0x00000200); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) +{ + audit_inode_child(inode, dentry, 4); + + fsnotify_dirent(inode, dentry, 0x00000100 | 0x40000000); +} + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) +{ + + ({ int __ret_warn_on = !!(d_is_negative(dentry)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1568)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/linux/fsnotify.h"), "i" (239), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1569)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1570)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); + + fsnotify_dirent(dir, dentry, 0x00000200 | 0x40000000); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_access(struct file *file) +{ + fsnotify_file(file, 0x00000001); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_modify(struct file *file) +{ + fsnotify_file(file, 0x00000002); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_open(struct file *file) +{ + __u32 mask = 0x00000020; + + if (file->f_flags & (( int) (( fmode_t)0x20))) + mask |= 0x00001000; + + fsnotify_file(file, mask); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_close(struct file *file) +{ + __u32 mask = (file->f_mode & (( fmode_t)0x2)) ? 0x00000008 : + 0x00000010; + + fsnotify_file(file, mask); +} + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_xattr(struct dentry *dentry) +{ + fsnotify_dentry(dentry, 0x00000004); +} + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) +{ + __u32 mask = 0; + + if (ia_valid & (1 << 1)) + mask |= 0x00000004; + if (ia_valid & (1 << 2)) + mask |= 0x00000004; + if (ia_valid & (1 << 3)) + mask |= 0x00000002; + + + if ((ia_valid & ((1 << 4) | (1 << 5))) == ((1 << 4) | (1 << 5))) + mask |= 0x00000004; + else if (ia_valid & (1 << 4)) + mask |= 0x00000001; + else if (ia_valid & (1 << 5)) + mask |= 0x00000002; + + if (ia_valid & (1 << 0)) + mask |= 0x00000004; + + if (mask) + fsnotify_dentry(dentry, mask); +} +# 76 "fs/io_uring.c" 2 +# 1 "./include/uapi/linux/fadvise.h" 1 +# 77 "fs/io_uring.c" 2 +# 1 "./include/linux/eventpoll.h" 1 +# 12 "./include/linux/eventpoll.h" +# 1 "./include/uapi/linux/kcmp.h" 1 + + + + + + + +enum kcmp_type { + KCMP_FILE, + KCMP_VM, + KCMP_FILES, + KCMP_FS, + KCMP_SIGHAND, + KCMP_IO, + KCMP_SYSVSEM, + KCMP_EPOLL_TFD, + + KCMP_TYPES, +}; + + +struct kcmp_epoll_slot { + __u32 efd; + __u32 tfd; + __u32 toff; +}; +# 13 "./include/linux/eventpoll.h" 2 + + + +struct file; + + + + + +struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff); + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eventpoll_init_file(struct file *file) +{ + INIT_LIST_HEAD(&file->f_ep_links); + INIT_LIST_HEAD(&file->f_tfile_llink); +} + + + +void eventpoll_release_file(struct file *file); + + + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void eventpoll_release(struct file *file) +{ +# 53 "./include/linux/eventpoll.h" + if (__builtin_expect(!!(list_empty(&file->f_ep_links)), 1)) + return; + + + + + + + eventpoll_release_file(file); +} + +int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, + bool nonblock); + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int ep_op_has_event(int op) +{ + return op != 2; +} +# 78 "fs/io_uring.c" 2 +# 1 "./include/linux/fs_struct.h" 1 +# 9 "./include/linux/fs_struct.h" +struct fs_struct { + int users; + spinlock_t lock; + seqcount_t seq; + int umask; + int in_exec; + struct path root, pwd; +} __attribute__((__designated_init__)); + +extern struct kmem_cache *fs_cachep; + +extern void exit_fs(struct task_struct *); +extern void set_fs_root(struct fs_struct *, const struct path *); +extern void set_fs_pwd(struct fs_struct *, const struct path *); +extern struct fs_struct *copy_fs_struct(struct fs_struct *); +extern void free_fs_struct(struct fs_struct *); +extern int unshare_fs_struct(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_fs_root(struct fs_struct *fs, struct path *root) +{ + spin_lock(&fs->lock); + *root = fs->root; + path_get(root); + spin_unlock(&fs->lock); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void get_fs_pwd(struct fs_struct *fs, struct path *pwd) +{ + spin_lock(&fs->lock); + *pwd = fs->pwd; + path_get(pwd); + spin_unlock(&fs->lock); +} + +extern bool current_chrooted(void); +# 79 "fs/io_uring.c" 2 + +# 1 "./include/linux/task_work.h" 1 + + + + + + + +typedef void (*task_work_func_t)(struct callback_head *); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void +init_task_work(struct callback_head *twork, task_work_func_t func) +{ + twork->func = func; +} + + + +int task_work_add(struct task_struct *task, struct callback_head *twork, int); + +struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); +void task_work_run(void); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void exit_task_work(struct task_struct *task) +{ + task_work_run(); +} +# 81 "fs/io_uring.c" 2 + + + +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_create; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_create(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags) { if (static_key_false(&__tracepoint_io_uring_create.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1571)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (24), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1572)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1573)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_create)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1574(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(long long))) __compiletime_assert_1574(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_create)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_create)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_create)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_create)->funcs))) *)&((&__tracepoint_io_uring_create)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_create)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_create)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags))(it_func))(__data, fd, ctx, sq_entries, cq_entries, flags); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_create.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_create.funcs)) *)({ do { extern void __compiletime_assert_1575(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_create.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_create.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_create.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_create.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_create.funcs)) == sizeof(long long))) __compiletime_assert_1575(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_create.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_create.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_create.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_create.funcs)))) *)&((__tracepoint_io_uring_create.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_create.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 24, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_create.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_create_rcuidle(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags) { if (static_key_false(&__tracepoint_io_uring_create.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1576)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (24), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1577)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1578)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_create)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1579(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_create)->funcs) == sizeof(long long))) __compiletime_assert_1579(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_create)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_create)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_create)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_create)->funcs))) *)&((&__tracepoint_io_uring_create)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_create)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_create)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags))(it_func))(__data, fd, ctx, sq_entries, cq_entries, flags); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_create(void (*probe)(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_create, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_create(void (*probe)(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_create, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_create(void (*probe)(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_create, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_create(void (*cb)(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_create_enabled(void) { return static_key_false(&__tracepoint_io_uring_create.key); } +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_register; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_register(void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret) { if (static_key_false(&__tracepoint_io_uring_register.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1580)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (67), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1581)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1582)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_register)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1583(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(long long))) __compiletime_assert_1583(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_register)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_register)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_register)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_register)->funcs))) *)&((&__tracepoint_io_uring_register)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_register)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_register)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret))(it_func))(__data, ctx, opcode, nr_files, nr_bufs, eventfd, ret); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_register.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_register.funcs)) *)({ do { extern void __compiletime_assert_1584(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_register.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_register.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_register.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_register.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_register.funcs)) == sizeof(long long))) __compiletime_assert_1584(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_register.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_register.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_register.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_register.funcs)))) *)&((__tracepoint_io_uring_register.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_register.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 67, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_register.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_register_rcuidle(void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret) { if (static_key_false(&__tracepoint_io_uring_register.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1585)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (67), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1586)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1587)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_register)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1588(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_register)->funcs) == sizeof(long long))) __compiletime_assert_1588(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_register)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_register)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_register)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_register)->funcs))) *)&((&__tracepoint_io_uring_register)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_register)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_register)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret))(it_func))(__data, ctx, opcode, nr_files, nr_bufs, eventfd, ret); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_register(void (*probe)(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_register, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_register(void (*probe)(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_register, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_register(void (*probe)(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_register, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_register(void (*cb)(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_register_enabled(void) { return static_key_false(&__tracepoint_io_uring_register.key); } +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_file_get; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_file_get(void *ctx, int fd) { if (static_key_false(&__tracepoint_io_uring_file_get.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1589)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (108), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1590)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1591)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_file_get)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1592(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(long long))) __compiletime_assert_1592(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_file_get)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_file_get)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_file_get)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_file_get)->funcs))) *)&((&__tracepoint_io_uring_file_get)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_file_get)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_file_get)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, int fd))(it_func))(__data, ctx, fd); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_file_get.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_file_get.funcs)) *)({ do { extern void __compiletime_assert_1593(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_file_get.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_file_get.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_file_get.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_file_get.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_file_get.funcs)) == sizeof(long long))) __compiletime_assert_1593(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_file_get.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_file_get.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_file_get.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_file_get.funcs)))) *)&((__tracepoint_io_uring_file_get.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_file_get.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 108, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_file_get.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_file_get_rcuidle(void *ctx, int fd) { if (static_key_false(&__tracepoint_io_uring_file_get.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1594)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (108), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1595)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1596)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_file_get)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1597(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_file_get)->funcs) == sizeof(long long))) __compiletime_assert_1597(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_file_get)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_file_get)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_file_get)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_file_get)->funcs))) *)&((&__tracepoint_io_uring_file_get)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_file_get)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_file_get)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, int fd))(it_func))(__data, ctx, fd); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_file_get(void (*probe)(void *__data, void *ctx, int fd), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_file_get, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_file_get(void (*probe)(void *__data, void *ctx, int fd), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_file_get, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_file_get(void (*probe)(void *__data, void *ctx, int fd), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_file_get, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_file_get(void (*cb)(void *__data, void *ctx, int fd)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_file_get_enabled(void) { return static_key_false(&__tracepoint_io_uring_file_get.key); } +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_queue_async_work; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_queue_async_work(void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags) { if (static_key_false(&__tracepoint_io_uring_queue_async_work.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1598)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (137), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1599)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1600)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_queue_async_work)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1601(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(long long))) __compiletime_assert_1601(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_queue_async_work)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_queue_async_work)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_queue_async_work)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_queue_async_work)->funcs))) *)&((&__tracepoint_io_uring_queue_async_work)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_queue_async_work)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_queue_async_work)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags))(it_func))(__data, ctx, rw, req, work, flags); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_queue_async_work.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_queue_async_work.funcs)) *)({ do { extern void __compiletime_assert_1602(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_queue_async_work.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_queue_async_work.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_queue_async_work.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_queue_async_work.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_queue_async_work.funcs)) == sizeof(long long))) __compiletime_assert_1602(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_queue_async_work.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_queue_async_work.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_queue_async_work.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_queue_async_work.funcs)))) *)&((__tracepoint_io_uring_queue_async_work.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_queue_async_work.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 137, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_queue_async_work.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_queue_async_work_rcuidle(void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags) { if (static_key_false(&__tracepoint_io_uring_queue_async_work.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1603)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (137), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1604)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1605)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_queue_async_work)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1606(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_queue_async_work)->funcs) == sizeof(long long))) __compiletime_assert_1606(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_queue_async_work)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_queue_async_work)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_queue_async_work)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_queue_async_work)->funcs))) *)&((&__tracepoint_io_uring_queue_async_work)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_queue_async_work)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_queue_async_work)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags))(it_func))(__data, ctx, rw, req, work, flags); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_queue_async_work(void (*probe)(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_queue_async_work, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_queue_async_work(void (*probe)(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_queue_async_work, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_queue_async_work(void (*probe)(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_queue_async_work, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_queue_async_work(void (*cb)(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_queue_async_work_enabled(void) { return static_key_false(&__tracepoint_io_uring_queue_async_work.key); } +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_defer; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_defer(void *ctx, void *req, unsigned long long user_data) { if (static_key_false(&__tracepoint_io_uring_defer.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1607)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (175), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1608)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1609)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_defer)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1610(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(long long))) __compiletime_assert_1610(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_defer)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_defer)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_defer)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_defer)->funcs))) *)&((&__tracepoint_io_uring_defer)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_defer)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_defer)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, void *req, unsigned long long user_data))(it_func))(__data, ctx, req, user_data); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_defer.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_defer.funcs)) *)({ do { extern void __compiletime_assert_1611(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_defer.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_defer.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_defer.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_defer.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_defer.funcs)) == sizeof(long long))) __compiletime_assert_1611(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_defer.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_defer.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_defer.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_defer.funcs)))) *)&((__tracepoint_io_uring_defer.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_defer.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 175, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_defer.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_defer_rcuidle(void *ctx, void *req, unsigned long long user_data) { if (static_key_false(&__tracepoint_io_uring_defer.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1612)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (175), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1613)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1614)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_defer)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1615(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_defer)->funcs) == sizeof(long long))) __compiletime_assert_1615(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_defer)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_defer)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_defer)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_defer)->funcs))) *)&((&__tracepoint_io_uring_defer)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_defer)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_defer)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, void *req, unsigned long long user_data))(it_func))(__data, ctx, req, user_data); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_defer(void (*probe)(void *__data, void *ctx, void *req, unsigned long long user_data), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_defer, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_defer(void (*probe)(void *__data, void *ctx, void *req, unsigned long long user_data), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_defer, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_defer(void (*probe)(void *__data, void *ctx, void *req, unsigned long long user_data), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_defer, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_defer(void (*cb)(void *__data, void *ctx, void *req, unsigned long long user_data)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_defer_enabled(void) { return static_key_false(&__tracepoint_io_uring_defer.key); } +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_link; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_link(void *ctx, void *req, void *target_req) { if (static_key_false(&__tracepoint_io_uring_link.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1616)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (208), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1617)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1618)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_link)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1619(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(long long))) __compiletime_assert_1619(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_link)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_link)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_link)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_link)->funcs))) *)&((&__tracepoint_io_uring_link)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_link)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_link)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, void *req, void *target_req))(it_func))(__data, ctx, req, target_req); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_link.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_link.funcs)) *)({ do { extern void __compiletime_assert_1620(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_link.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_link.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_link.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_link.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_link.funcs)) == sizeof(long long))) __compiletime_assert_1620(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_link.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_link.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_link.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_link.funcs)))) *)&((__tracepoint_io_uring_link.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_link.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 208, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_link.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_link_rcuidle(void *ctx, void *req, void *target_req) { if (static_key_false(&__tracepoint_io_uring_link.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1621)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (208), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1622)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1623)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_link)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1624(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_link)->funcs) == sizeof(long long))) __compiletime_assert_1624(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_link)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_link)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_link)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_link)->funcs))) *)&((&__tracepoint_io_uring_link)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_link)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_link)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, void *req, void *target_req))(it_func))(__data, ctx, req, target_req); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_link(void (*probe)(void *__data, void *ctx, void *req, void *target_req), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_link, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_link(void (*probe)(void *__data, void *ctx, void *req, void *target_req), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_link, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_link(void (*probe)(void *__data, void *ctx, void *req, void *target_req), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_link, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_link(void (*cb)(void *__data, void *ctx, void *req, void *target_req)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_link_enabled(void) { return static_key_false(&__tracepoint_io_uring_link.key); } +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_cqring_wait; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_cqring_wait(void *ctx, int min_events) { if (static_key_false(&__tracepoint_io_uring_cqring_wait.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1625)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (240), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1626)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1627)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_cqring_wait)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1628(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(long long))) __compiletime_assert_1628(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_cqring_wait)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_cqring_wait)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_cqring_wait)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_cqring_wait)->funcs))) *)&((&__tracepoint_io_uring_cqring_wait)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_cqring_wait)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_cqring_wait)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, int min_events))(it_func))(__data, ctx, min_events); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_cqring_wait.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_cqring_wait.funcs)) *)({ do { extern void __compiletime_assert_1629(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_cqring_wait.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_cqring_wait.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_cqring_wait.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_cqring_wait.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_cqring_wait.funcs)) == sizeof(long long))) __compiletime_assert_1629(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_cqring_wait.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_cqring_wait.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_cqring_wait.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_cqring_wait.funcs)))) *)&((__tracepoint_io_uring_cqring_wait.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_cqring_wait.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 240, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_cqring_wait.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_cqring_wait_rcuidle(void *ctx, int min_events) { if (static_key_false(&__tracepoint_io_uring_cqring_wait.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1630)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (240), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1631)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1632)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_cqring_wait)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1633(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_cqring_wait)->funcs) == sizeof(long long))) __compiletime_assert_1633(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_cqring_wait)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_cqring_wait)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_cqring_wait)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_cqring_wait)->funcs))) *)&((&__tracepoint_io_uring_cqring_wait)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_cqring_wait)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_cqring_wait)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, int min_events))(it_func))(__data, ctx, min_events); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_cqring_wait(void (*probe)(void *__data, void *ctx, int min_events), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_cqring_wait, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_cqring_wait(void (*probe)(void *__data, void *ctx, int min_events), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_cqring_wait, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_cqring_wait(void (*probe)(void *__data, void *ctx, int min_events), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_cqring_wait, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_cqring_wait(void (*cb)(void *__data, void *ctx, int min_events)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_cqring_wait_enabled(void) { return static_key_false(&__tracepoint_io_uring_cqring_wait.key); } +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_fail_link; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_fail_link(void *req, void *link) { if (static_key_false(&__tracepoint_io_uring_fail_link.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1634)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (268), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1635)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1636)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_fail_link)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1637(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(long long))) __compiletime_assert_1637(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_fail_link)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_fail_link)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_fail_link)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_fail_link)->funcs))) *)&((&__tracepoint_io_uring_fail_link)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_fail_link)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_fail_link)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *req, void *link))(it_func))(__data, req, link); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_fail_link.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_fail_link.funcs)) *)({ do { extern void __compiletime_assert_1638(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_fail_link.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_fail_link.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_fail_link.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_fail_link.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_fail_link.funcs)) == sizeof(long long))) __compiletime_assert_1638(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_fail_link.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_fail_link.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_fail_link.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_fail_link.funcs)))) *)&((__tracepoint_io_uring_fail_link.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_fail_link.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 268, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_fail_link.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_fail_link_rcuidle(void *req, void *link) { if (static_key_false(&__tracepoint_io_uring_fail_link.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1639)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (268), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1640)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1641)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_fail_link)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1642(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_fail_link)->funcs) == sizeof(long long))) __compiletime_assert_1642(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_fail_link)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_fail_link)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_fail_link)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_fail_link)->funcs))) *)&((&__tracepoint_io_uring_fail_link)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_fail_link)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_fail_link)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *req, void *link))(it_func))(__data, req, link); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_fail_link(void (*probe)(void *__data, void *req, void *link), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_fail_link, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_fail_link(void (*probe)(void *__data, void *req, void *link), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_fail_link, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_fail_link(void (*probe)(void *__data, void *req, void *link), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_fail_link, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_fail_link(void (*cb)(void *__data, void *req, void *link)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_fail_link_enabled(void) { return static_key_false(&__tracepoint_io_uring_fail_link.key); } +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_complete; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_complete(void *ctx, u64 user_data, long res) { if (static_key_false(&__tracepoint_io_uring_complete.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1643)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (295), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1644)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1645)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_complete)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1646(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(long long))) __compiletime_assert_1646(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_complete)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_complete)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_complete)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_complete)->funcs))) *)&((&__tracepoint_io_uring_complete)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_complete)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_complete)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u64 user_data, long res))(it_func))(__data, ctx, user_data, res); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_complete.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_complete.funcs)) *)({ do { extern void __compiletime_assert_1647(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_complete.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_complete.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_complete.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_complete.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_complete.funcs)) == sizeof(long long))) __compiletime_assert_1647(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_complete.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_complete.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_complete.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_complete.funcs)))) *)&((__tracepoint_io_uring_complete.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_complete.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 295, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_complete.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_complete_rcuidle(void *ctx, u64 user_data, long res) { if (static_key_false(&__tracepoint_io_uring_complete.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1648)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (295), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1649)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1650)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_complete)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1651(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_complete)->funcs) == sizeof(long long))) __compiletime_assert_1651(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_complete)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_complete)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_complete)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_complete)->funcs))) *)&((&__tracepoint_io_uring_complete)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_complete)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_complete)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u64 user_data, long res))(it_func))(__data, ctx, user_data, res); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_complete(void (*probe)(void *__data, void *ctx, u64 user_data, long res), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_complete, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_complete(void (*probe)(void *__data, void *ctx, u64 user_data, long res), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_complete, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_complete(void (*probe)(void *__data, void *ctx, u64 user_data, long res), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_complete, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_complete(void (*cb)(void *__data, void *ctx, u64 user_data, long res)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_complete_enabled(void) { return static_key_false(&__tracepoint_io_uring_complete.key); } +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +extern struct tracepoint __tracepoint_io_uring_submit_sqe; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_submit_sqe(void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread) { if (static_key_false(&__tracepoint_io_uring_submit_sqe.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1652)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (331), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1653)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1654)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_submit_sqe)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1655(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(long long))) __compiletime_assert_1655(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_submit_sqe)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_submit_sqe)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_submit_sqe)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_submit_sqe)->funcs))) *)&((&__tracepoint_io_uring_submit_sqe)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_submit_sqe)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_submit_sqe)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread))(it_func))(__data, ctx, opcode, user_data, force_nonblock, sq_thread); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_submit_sqe.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_submit_sqe.funcs)) *)({ do { extern void __compiletime_assert_1656(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_submit_sqe.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_submit_sqe.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_submit_sqe.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_submit_sqe.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_submit_sqe.funcs)) == sizeof(long long))) __compiletime_assert_1656(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_submit_sqe.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_submit_sqe.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_submit_sqe.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_submit_sqe.funcs)))) *)&((__tracepoint_io_uring_submit_sqe.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_submit_sqe.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 331, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_submit_sqe.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_submit_sqe_rcuidle(void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread) { if (static_key_false(&__tracepoint_io_uring_submit_sqe.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1657)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (331), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1658)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1659)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_submit_sqe)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1660(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_submit_sqe)->funcs) == sizeof(long long))) __compiletime_assert_1660(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_submit_sqe)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_submit_sqe)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_submit_sqe)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_submit_sqe)->funcs))) *)&((&__tracepoint_io_uring_submit_sqe)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_submit_sqe)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_submit_sqe)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread))(it_func))(__data, ctx, opcode, user_data, force_nonblock, sq_thread); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_submit_sqe(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_submit_sqe, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_submit_sqe(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_submit_sqe, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_submit_sqe(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_submit_sqe, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_submit_sqe(void (*cb)(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_submit_sqe_enabled(void) { return static_key_false(&__tracepoint_io_uring_submit_sqe.key); } +# 358 "./include/trace/events/io_uring.h" + ; + +extern struct tracepoint __tracepoint_io_uring_poll_arm; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_poll_arm(void *ctx, u8 opcode, u64 user_data, int mask, int events) { if (static_key_false(&__tracepoint_io_uring_poll_arm.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1661)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (360), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1662)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1663)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_poll_arm)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1664(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(long long))) __compiletime_assert_1664(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_poll_arm)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_poll_arm)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_poll_arm)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_poll_arm)->funcs))) *)&((&__tracepoint_io_uring_poll_arm)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_poll_arm)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_poll_arm)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events))(it_func))(__data, ctx, opcode, user_data, mask, events); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_poll_arm.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_poll_arm.funcs)) *)({ do { extern void __compiletime_assert_1665(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_poll_arm.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_poll_arm.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_poll_arm.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_poll_arm.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_poll_arm.funcs)) == sizeof(long long))) __compiletime_assert_1665(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_poll_arm.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_poll_arm.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_poll_arm.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_poll_arm.funcs)))) *)&((__tracepoint_io_uring_poll_arm.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_poll_arm.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 360, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_poll_arm.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_poll_arm_rcuidle(void *ctx, u8 opcode, u64 user_data, int mask, int events) { if (static_key_false(&__tracepoint_io_uring_poll_arm.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1666)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (360), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1667)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1668)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_poll_arm)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1669(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_poll_arm)->funcs) == sizeof(long long))) __compiletime_assert_1669(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_poll_arm)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_poll_arm)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_poll_arm)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_poll_arm)->funcs))) *)&((&__tracepoint_io_uring_poll_arm)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_poll_arm)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_poll_arm)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events))(it_func))(__data, ctx, opcode, user_data, mask, events); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_poll_arm(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_poll_arm, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_poll_arm(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_poll_arm, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_poll_arm(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_poll_arm, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_poll_arm(void (*cb)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_poll_arm_enabled(void) { return static_key_false(&__tracepoint_io_uring_poll_arm.key); } +# 386 "./include/trace/events/io_uring.h" + ; + +extern struct tracepoint __tracepoint_io_uring_poll_wake; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_poll_wake(void *ctx, u8 opcode, u64 user_data, int mask) { if (static_key_false(&__tracepoint_io_uring_poll_wake.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1670)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (388), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1671)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1672)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_poll_wake)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1673(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(long long))) __compiletime_assert_1673(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_poll_wake)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_poll_wake)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_poll_wake)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_poll_wake)->funcs))) *)&((&__tracepoint_io_uring_poll_wake)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_poll_wake)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_poll_wake)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask))(it_func))(__data, ctx, opcode, user_data, mask); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_poll_wake.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_poll_wake.funcs)) *)({ do { extern void __compiletime_assert_1674(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_poll_wake.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_poll_wake.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_poll_wake.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_poll_wake.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_poll_wake.funcs)) == sizeof(long long))) __compiletime_assert_1674(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_poll_wake.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_poll_wake.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_poll_wake.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_poll_wake.funcs)))) *)&((__tracepoint_io_uring_poll_wake.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_poll_wake.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 388, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_poll_wake.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_poll_wake_rcuidle(void *ctx, u8 opcode, u64 user_data, int mask) { if (static_key_false(&__tracepoint_io_uring_poll_wake.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1675)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (388), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1676)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1677)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_poll_wake)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1678(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_poll_wake)->funcs) == sizeof(long long))) __compiletime_assert_1678(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_poll_wake)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_poll_wake)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_poll_wake)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_poll_wake)->funcs))) *)&((&__tracepoint_io_uring_poll_wake)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_poll_wake)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_poll_wake)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask))(it_func))(__data, ctx, opcode, user_data, mask); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_poll_wake(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_poll_wake, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_poll_wake(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_poll_wake, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_poll_wake(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_poll_wake, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_poll_wake(void (*cb)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_poll_wake_enabled(void) { return static_key_false(&__tracepoint_io_uring_poll_wake.key); } +# 412 "./include/trace/events/io_uring.h" + ; + +extern struct tracepoint __tracepoint_io_uring_task_add; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_task_add(void *ctx, u8 opcode, u64 user_data, int mask) { if (static_key_false(&__tracepoint_io_uring_task_add.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1679)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (414), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1680)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1681)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_task_add)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1682(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(long long))) __compiletime_assert_1682(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_task_add)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_task_add)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_task_add)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_task_add)->funcs))) *)&((&__tracepoint_io_uring_task_add)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_task_add)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_task_add)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask))(it_func))(__data, ctx, opcode, user_data, mask); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_task_add.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_task_add.funcs)) *)({ do { extern void __compiletime_assert_1683(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_task_add.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_task_add.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_task_add.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_task_add.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_task_add.funcs)) == sizeof(long long))) __compiletime_assert_1683(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_task_add.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_task_add.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_task_add.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_task_add.funcs)))) *)&((__tracepoint_io_uring_task_add.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_task_add.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 414, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_task_add.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_task_add_rcuidle(void *ctx, u8 opcode, u64 user_data, int mask) { if (static_key_false(&__tracepoint_io_uring_task_add.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1684)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (414), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1685)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1686)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_task_add)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1687(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_task_add)->funcs) == sizeof(long long))) __compiletime_assert_1687(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_task_add)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_task_add)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_task_add)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_task_add)->funcs))) *)&((&__tracepoint_io_uring_task_add)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_task_add)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_task_add)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask))(it_func))(__data, ctx, opcode, user_data, mask); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_task_add(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_task_add, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_task_add(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_task_add, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_task_add(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_task_add, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_task_add(void (*cb)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_task_add_enabled(void) { return static_key_false(&__tracepoint_io_uring_task_add.key); } +# 438 "./include/trace/events/io_uring.h" + ; + +extern struct tracepoint __tracepoint_io_uring_task_run; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_task_run(void *ctx, u8 opcode, u64 user_data) { if (static_key_false(&__tracepoint_io_uring_task_run.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(0 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1688)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (440), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1689)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1690)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (0) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_task_run)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1691(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(long long))) __compiletime_assert_1691(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_task_run)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_task_run)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_task_run)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_task_run)->funcs))) *)&((&__tracepoint_io_uring_task_run)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_task_run)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_task_run)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data))(it_func))(__data, ctx, opcode, user_data); } while ((++it_func_ptr)->func); } if (0) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); if (1 && (cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) { rcu_read_lock_sched_notrace(); ({ typeof(*(__tracepoint_io_uring_task_run.funcs)) *________p1 = (typeof(*(__tracepoint_io_uring_task_run.funcs)) *)({ do { extern void __compiletime_assert_1692(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((__tracepoint_io_uring_task_run.funcs)) == sizeof(char) || sizeof((__tracepoint_io_uring_task_run.funcs)) == sizeof(short) || sizeof((__tracepoint_io_uring_task_run.funcs)) == sizeof(int) || sizeof((__tracepoint_io_uring_task_run.funcs)) == sizeof(long)) || sizeof((__tracepoint_io_uring_task_run.funcs)) == sizeof(long long))) __compiletime_assert_1692(); } while (0); ({ typeof( _Generic(((__tracepoint_io_uring_task_run.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_task_run.funcs)))) __x = (*(const volatile typeof( _Generic(((__tracepoint_io_uring_task_run.funcs)), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((__tracepoint_io_uring_task_run.funcs)))) *)&((__tracepoint_io_uring_task_run.funcs))); do { } while (0); (typeof((__tracepoint_io_uring_task_run.funcs)))__x; }); }); do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((0) || rcu_read_lock_sched_held()))) { __warned = true; lockdep_rcu_suspicious("include/trace/events/io_uring.h", 440, "suspicious rcu_dereference_check() usage"); } } while (0); ; ((typeof(*(__tracepoint_io_uring_task_run.funcs)) *)(________p1)); }); rcu_read_unlock_sched_notrace(); } } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void trace_io_uring_task_run_rcuidle(void *ctx, u8 opcode, u64 user_data) { if (static_key_false(&__tracepoint_io_uring_task_run.key)) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; int __attribute__((__unused__)) __idx = 0; if (!(cpumask_test_cpu((({ typeof(cpu_number) pscr_ret__; do { const void *__vpp_verify = (typeof((&(cpu_number)) + 0))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(cpu_number)) { case 1: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 2: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 4: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; case 8: pscr_ret__ = ({ typeof(cpu_number) pfo_ret__; switch (sizeof(cpu_number)) { case 1: asm volatile ("mov" "b ""%%""gs"":" "%" "1"",%0" : "=q" (pfo_ret__) : "m" (cpu_number)); break; case 2: asm volatile ("mov" "w ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 4: asm volatile ("mov" "l ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; case 8: asm volatile ("mov" "q ""%%""gs"":" "%" "1"",%0" : "=r" (pfo_ret__) : "m" (cpu_number)); break; default: __bad_percpu_size(); } pfo_ret__; }); break; default: __bad_size_call_parameter(); break; } pscr_ret__; })), ((const struct cpumask *)&__cpu_online_mask)))) return; ({ int __ret_warn_on = !!(1 && (preempt_count() & (((1UL << (4))-1) << (((0 + 8) + 8) + 4)))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1693)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("include/trace/events/io_uring.h"), "i" (440), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1694)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1695)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); do { __preempt_count_add(1); __asm__ __volatile__("": : :"memory"); } while (0); if (1) { __idx = srcu_read_lock_notrace(&tracepoint_srcu); rcu_irq_enter_irqson(); } it_func_ptr = ({ typeof((&__tracepoint_io_uring_task_run)->funcs) ________p1 = ({ do { extern void __compiletime_assert_1696(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(char) || sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(short) || sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(int) || sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(long)) || sizeof((&__tracepoint_io_uring_task_run)->funcs) == sizeof(long long))) __compiletime_assert_1696(); } while (0); ({ typeof( _Generic(((&__tracepoint_io_uring_task_run)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_task_run)->funcs))) __x = (*(const volatile typeof( _Generic(((&__tracepoint_io_uring_task_run)->funcs), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: ((&__tracepoint_io_uring_task_run)->funcs))) *)&((&__tracepoint_io_uring_task_run)->funcs)); do { } while (0); (typeof((&__tracepoint_io_uring_task_run)->funcs))__x; }); }); ((typeof(*(&__tracepoint_io_uring_task_run)->funcs) *)(________p1)); }); if (it_func_ptr) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, void *ctx, u8 opcode, u64 user_data))(it_func))(__data, ctx, opcode, user_data); } while ((++it_func_ptr)->func); } if (1) { rcu_irq_exit_irqson(); srcu_read_unlock_notrace(&tracepoint_srcu, __idx); } do { __asm__ __volatile__("": : :"memory"); __preempt_count_sub(1); } while (0); } while (0); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_io_uring_task_run(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data), void *data) { return tracepoint_probe_register(&__tracepoint_io_uring_task_run, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int register_trace_prio_io_uring_task_run(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data), void *data, int prio) { return tracepoint_probe_register_prio(&__tracepoint_io_uring_task_run, (void *)probe, data, prio); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int unregister_trace_io_uring_task_run(void (*probe)(void *__data, void *ctx, u8 opcode, u64 user_data), void *data) { return tracepoint_probe_unregister(&__tracepoint_io_uring_task_run, (void *)probe, data); } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void check_trace_callback_type_io_uring_task_run(void (*cb)(void *__data, void *ctx, u8 opcode, u64 user_data)) { } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool trace_io_uring_task_run_enabled(void) { return static_key_false(&__tracepoint_io_uring_task_run.key); } +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 95 "./include/trace/define_trace.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_create[] __attribute__((section("__tracepoints_strings"))) = "io_uring_create"; struct tracepoint __tracepoint_io_uring_create __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_create, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_create" " - . \n" " .previous \n");; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_register[] __attribute__((section("__tracepoints_strings"))) = "io_uring_register"; struct tracepoint __tracepoint_io_uring_register __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_register, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_register" " - . \n" " .previous \n");; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_file_get[] __attribute__((section("__tracepoints_strings"))) = "io_uring_file_get"; struct tracepoint __tracepoint_io_uring_file_get __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_file_get, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_file_get" " - . \n" " .previous \n");; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_queue_async_work[] __attribute__((section("__tracepoints_strings"))) = "io_uring_queue_async_work"; struct tracepoint __tracepoint_io_uring_queue_async_work __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_queue_async_work, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_queue_async_work" " - . \n" " .previous \n");; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_defer[] __attribute__((section("__tracepoints_strings"))) = "io_uring_defer"; struct tracepoint __tracepoint_io_uring_defer __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_defer, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_defer" " - . \n" " .previous \n");; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_link[] __attribute__((section("__tracepoints_strings"))) = "io_uring_link"; struct tracepoint __tracepoint_io_uring_link __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_link, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_link" " - . \n" " .previous \n");; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_cqring_wait[] __attribute__((section("__tracepoints_strings"))) = "io_uring_cqring_wait"; struct tracepoint __tracepoint_io_uring_cqring_wait __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_cqring_wait, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_cqring_wait" " - . \n" " .previous \n");; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_fail_link[] __attribute__((section("__tracepoints_strings"))) = "io_uring_fail_link"; struct tracepoint __tracepoint_io_uring_fail_link __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_fail_link, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_fail_link" " - . \n" " .previous \n");; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_complete[] __attribute__((section("__tracepoints_strings"))) = "io_uring_complete"; struct tracepoint __tracepoint_io_uring_complete __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_complete, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_complete" " - . \n" " .previous \n");; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +static const char __tpstrtab_io_uring_submit_sqe[] __attribute__((section("__tracepoints_strings"))) = "io_uring_submit_sqe"; struct tracepoint __tracepoint_io_uring_submit_sqe __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_submit_sqe, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_submit_sqe" " - . \n" " .previous \n");; +# 358 "./include/trace/events/io_uring.h" + ; + +static const char __tpstrtab_io_uring_poll_arm[] __attribute__((section("__tracepoints_strings"))) = "io_uring_poll_arm"; struct tracepoint __tracepoint_io_uring_poll_arm __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_poll_arm, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_poll_arm" " - . \n" " .previous \n");; +# 386 "./include/trace/events/io_uring.h" + ; + +static const char __tpstrtab_io_uring_poll_wake[] __attribute__((section("__tracepoints_strings"))) = "io_uring_poll_wake"; struct tracepoint __tracepoint_io_uring_poll_wake __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_poll_wake, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_poll_wake" " - . \n" " .previous \n");; +# 412 "./include/trace/events/io_uring.h" + ; + +static const char __tpstrtab_io_uring_task_add[] __attribute__((section("__tracepoints_strings"))) = "io_uring_task_add"; struct tracepoint __tracepoint_io_uring_task_add __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_task_add, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_task_add" " - . \n" " .previous \n");; +# 438 "./include/trace/events/io_uring.h" + ; + +static const char __tpstrtab_io_uring_task_run[] __attribute__((section("__tracepoints_strings"))) = "io_uring_task_run"; struct tracepoint __tracepoint_io_uring_task_run __attribute__((section("__tracepoints"), used)) = { __tpstrtab_io_uring_task_run, { .enabled = { 0 }, { .entries = (void *)0UL } }, ((void *)0), ((void *)0), ((void *)0) }; asm(" .section \"__tracepoints_ptrs\", \"a\" \n" " .balign 4 \n" " .long __tracepoint_" "io_uring_task_run" " - . \n" " .previous \n");; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 96 "./include/trace/define_trace.h" 2 + + + + + + +# 1 "./include/trace/trace_events.h" 1 +# 36 "./include/trace/trace_events.h" +static const char str__io_uring__trace_system_name[] = "io_uring"; +# 155 "./include/trace/trace_events.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_create { struct trace_entry ent; int fd; void * ctx; u32 sq_entries; u32 cq_entries; u32 flags; char __data[0]; }; static struct trace_event_class event_class_io_uring_create;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_create; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_register { struct trace_entry ent; void * ctx; unsigned opcode; unsigned nr_files; unsigned nr_bufs; bool eventfd; long ret; char __data[0]; }; static struct trace_event_class event_class_io_uring_register;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_register; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_file_get { struct trace_entry ent; void * ctx; int fd; char __data[0]; }; static struct trace_event_class event_class_io_uring_file_get;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_file_get; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_queue_async_work { struct trace_entry ent; void * ctx; int rw; void * req; struct io_wq_work * work; unsigned int flags; char __data[0]; }; static struct trace_event_class event_class_io_uring_queue_async_work;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_queue_async_work; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_defer { struct trace_entry ent; void * ctx; void * req; unsigned long long data; char __data[0]; }; static struct trace_event_class event_class_io_uring_defer;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_defer; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_link { struct trace_entry ent; void * ctx; void * req; void * target_req; char __data[0]; }; static struct trace_event_class event_class_io_uring_link;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_link; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_cqring_wait { struct trace_entry ent; void * ctx; int min_events; char __data[0]; }; static struct trace_event_class event_class_io_uring_cqring_wait;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_cqring_wait; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_fail_link { struct trace_entry ent; void * req; void * link; char __data[0]; }; static struct trace_event_class event_class_io_uring_fail_link;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_fail_link; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_complete { struct trace_entry ent; void * ctx; u64 user_data; long res; char __data[0]; }; static struct trace_event_class event_class_io_uring_complete;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_complete; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +struct trace_event_raw_io_uring_submit_sqe { struct trace_entry ent; void * ctx; u8 opcode; u64 user_data; bool force_nonblock; bool sq_thread; char __data[0]; }; static struct trace_event_class event_class_io_uring_submit_sqe;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_submit_sqe; +# 358 "./include/trace/events/io_uring.h" + ; + +struct trace_event_raw_io_uring_poll_arm { struct trace_entry ent; void * ctx; u8 opcode; u64 user_data; int mask; int events; char __data[0]; }; static struct trace_event_class event_class_io_uring_poll_arm;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_poll_arm; +# 386 "./include/trace/events/io_uring.h" + ; + +struct trace_event_raw_io_uring_poll_wake { struct trace_entry ent; void * ctx; u8 opcode; u64 user_data; int mask; char __data[0]; }; static struct trace_event_class event_class_io_uring_poll_wake;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_poll_wake; +# 412 "./include/trace/events/io_uring.h" + ; + +struct trace_event_raw_io_uring_task_add { struct trace_entry ent; void * ctx; u8 opcode; u64 user_data; int mask; char __data[0]; }; static struct trace_event_class event_class_io_uring_task_add;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_task_add; +# 438 "./include/trace/events/io_uring.h" + ; + +struct trace_event_raw_io_uring_task_run { struct trace_entry ent; void * ctx; u8 opcode; u64 user_data; char __data[0]; }; static struct trace_event_class event_class_io_uring_task_run;; static struct trace_event_call __attribute__((__used__)) __attribute__((__aligned__(4))) event_io_uring_task_run; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 156 "./include/trace/trace_events.h" 2 +# 222 "./include/trace/trace_events.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_create { ; };; ; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_register { ; };; ; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_file_get { ; };; ; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_queue_async_work { ; };; ; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_defer { ; };; ; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_link { ; };; ; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_cqring_wait { ; };; ; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_fail_link { ; };; ; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_complete { ; };; ; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +struct trace_event_data_offsets_io_uring_submit_sqe { ; };; ; +# 358 "./include/trace/events/io_uring.h" + ; + +struct trace_event_data_offsets_io_uring_poll_arm { ; };; ; +# 386 "./include/trace/events/io_uring.h" + ; + +struct trace_event_data_offsets_io_uring_poll_wake { ; };; ; +# 412 "./include/trace/events/io_uring.h" + ; + +struct trace_event_data_offsets_io_uring_task_add { ; };; ; +# 438 "./include/trace/events/io_uring.h" + ; + +struct trace_event_data_offsets_io_uring_task_run { ; };; ; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 223 "./include/trace/trace_events.h" 2 +# 402 "./include/trace/trace_events.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_create(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_create *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, fd %d sq size %d, cq size %d, flags %d" "\n", field->ctx, field->fd, field->sq_entries, field->cq_entries, field->flags); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_create = { .trace = trace_raw_output_io_uring_create, };; ; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_register(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_register *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, " "eventfd %d, ret %ld" "\n", field->ctx, field->opcode, field->nr_files, field->nr_bufs, field->eventfd, field->ret); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_register = { .trace = trace_raw_output_io_uring_register, };; ; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_file_get(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_file_get *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, fd %d" "\n", field->ctx, field->fd); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_file_get = { .trace = trace_raw_output_io_uring_file_get, };; ; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_queue_async_work(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_queue_async_work *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, request %p, flags %d, %s queue, work %p" "\n", field->ctx, field->req, field->flags, field->rw ? "hashed" : "normal", field->work); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_queue_async_work = { .trace = trace_raw_output_io_uring_queue_async_work, };; ; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_defer(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_defer *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, request %p user_data %llu" "\n", field->ctx, field->req, field->data); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_defer = { .trace = trace_raw_output_io_uring_defer, };; ; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_link(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_link *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, request %p linked after %p" "\n", field->ctx, field->req, field->target_req); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_link = { .trace = trace_raw_output_io_uring_link, };; ; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_cqring_wait(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_cqring_wait *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, min_events %d" "\n", field->ctx, field->min_events); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_cqring_wait = { .trace = trace_raw_output_io_uring_cqring_wait, };; ; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_fail_link(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_fail_link *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "request %p, link %p" "\n", field->req, field->link); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_fail_link = { .trace = trace_raw_output_io_uring_fail_link, };; ; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_complete(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_complete *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, user_data 0x%llx, result %ld" "\n", field->ctx, (unsigned long long)field->user_data, field->res); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_complete = { .trace = trace_raw_output_io_uring_complete, };; ; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_submit_sqe(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_submit_sqe *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, op %d, data 0x%llx, non block %d, sq_thread %d" "\n", field->ctx, field->opcode, (unsigned long long) field->user_data, field->force_nonblock, field->sq_thread); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_submit_sqe = { .trace = trace_raw_output_io_uring_submit_sqe, };; ; +# 358 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_poll_arm(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_poll_arm *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x" "\n", field->ctx, field->opcode, (unsigned long long) field->user_data, field->mask, field->events); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_poll_arm = { .trace = trace_raw_output_io_uring_poll_arm, };; ; +# 386 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_poll_wake(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_poll_wake *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, op %d, data 0x%llx, mask 0x%x" "\n", field->ctx, field->opcode, (unsigned long long) field->user_data, field->mask); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_poll_wake = { .trace = trace_raw_output_io_uring_poll_wake, };; ; +# 412 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_task_add(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_task_add *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, op %d, data 0x%llx, mask %x" "\n", field->ctx, field->opcode, (unsigned long long) field->user_data, field->mask); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_task_add = { .trace = trace_raw_output_io_uring_task_add, };; ; +# 438 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) enum print_line_t trace_raw_output_io_uring_task_run(struct trace_iterator *iter, int flags, struct trace_event *trace_event) { struct trace_seq *s = &iter->seq; struct trace_seq __attribute__((__unused__)) *p = &iter->tmp_seq; struct trace_event_raw_io_uring_task_run *field; int ret; field = (typeof(field))iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != TRACE_TYPE_HANDLED) return ret; trace_seq_printf(s, "ring %p, op %d, data 0x%llx" "\n", field->ctx, field->opcode, (unsigned long long) field->user_data); return trace_handle_return(s); } static struct trace_event_functions trace_event_type_funcs_io_uring_task_run = { .trace = trace_raw_output_io_uring_task_run, };; ; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 403 "./include/trace/trace_events.h" 2 +# 453 "./include/trace/trace_events.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_create[] = { { .type = "int", .name = "fd", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "u32", .name = "sq_entries", .size = sizeof(u32), .align = __alignof__(u32), .is_signed = (((u32)(-1)) < (u32)1), .filter_type = FILTER_OTHER }, { .type = "u32", .name = "cq_entries", .size = sizeof(u32), .align = __alignof__(u32), .is_signed = (((u32)(-1)) < (u32)1), .filter_type = FILTER_OTHER }, { .type = "u32", .name = "flags", .size = sizeof(u32), .align = __alignof__(u32), .is_signed = (((u32)(-1)) < (u32)1), .filter_type = FILTER_OTHER }, {} };; ; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_register[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "unsigned", .name = "opcode", .size = sizeof(unsigned), .align = __alignof__(unsigned), .is_signed = (((unsigned)(-1)) < (unsigned)1), .filter_type = FILTER_OTHER }, { .type = "unsigned", .name = "nr_files", .size = sizeof(unsigned), .align = __alignof__(unsigned), .is_signed = (((unsigned)(-1)) < (unsigned)1), .filter_type = FILTER_OTHER }, { .type = "unsigned", .name = "nr_bufs", .size = sizeof(unsigned), .align = __alignof__(unsigned), .is_signed = (((unsigned)(-1)) < (unsigned)1), .filter_type = FILTER_OTHER }, { .type = "bool", .name = "eventfd", .size = sizeof(bool), .align = __alignof__(bool), .is_signed = (((bool)(-1)) < (bool)1), .filter_type = FILTER_OTHER }, { .type = "long", .name = "ret", .size = sizeof(long), .align = __alignof__(long), .is_signed = (((long)(-1)) < (long)1), .filter_type = FILTER_OTHER }, {} };; ; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_file_get[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "fd", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, {} };; ; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_queue_async_work[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "rw", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, { .type = "void *", .name = "req", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "struct io_wq_work *", .name = "work", .size = sizeof(struct io_wq_work *), .align = __alignof__(struct io_wq_work *), .is_signed = (((struct io_wq_work *)(-1)) < (struct io_wq_work *)1), .filter_type = FILTER_OTHER }, { .type = "unsigned int", .name = "flags", .size = sizeof(unsigned int), .align = __alignof__(unsigned int), .is_signed = (((unsigned int)(-1)) < (unsigned int)1), .filter_type = FILTER_OTHER }, {} };; ; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_defer[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "void *", .name = "req", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "unsigned long long", .name = "data", .size = sizeof(unsigned long long), .align = __alignof__(unsigned long long), .is_signed = (((unsigned long long)(-1)) < (unsigned long long)1), .filter_type = FILTER_OTHER }, {} };; ; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_link[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "void *", .name = "req", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "void *", .name = "target_req", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, {} };; ; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_cqring_wait[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "min_events", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, {} };; ; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_fail_link[] = { { .type = "void *", .name = "req", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "void *", .name = "link", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, {} };; ; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_complete[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "u64", .name = "user_data", .size = sizeof(u64), .align = __alignof__(u64), .is_signed = (((u64)(-1)) < (u64)1), .filter_type = FILTER_OTHER }, { .type = "long", .name = "res", .size = sizeof(long), .align = __alignof__(long), .is_signed = (((long)(-1)) < (long)1), .filter_type = FILTER_OTHER }, {} };; ; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +static struct trace_event_fields trace_event_fields_io_uring_submit_sqe[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "u8", .name = "opcode", .size = sizeof(u8), .align = __alignof__(u8), .is_signed = (((u8)(-1)) < (u8)1), .filter_type = FILTER_OTHER }, { .type = "u64", .name = "user_data", .size = sizeof(u64), .align = __alignof__(u64), .is_signed = (((u64)(-1)) < (u64)1), .filter_type = FILTER_OTHER }, { .type = "bool", .name = "force_nonblock", .size = sizeof(bool), .align = __alignof__(bool), .is_signed = (((bool)(-1)) < (bool)1), .filter_type = FILTER_OTHER }, { .type = "bool", .name = "sq_thread", .size = sizeof(bool), .align = __alignof__(bool), .is_signed = (((bool)(-1)) < (bool)1), .filter_type = FILTER_OTHER }, {} };; ; +# 358 "./include/trace/events/io_uring.h" + ; + +static struct trace_event_fields trace_event_fields_io_uring_poll_arm[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "u8", .name = "opcode", .size = sizeof(u8), .align = __alignof__(u8), .is_signed = (((u8)(-1)) < (u8)1), .filter_type = FILTER_OTHER }, { .type = "u64", .name = "user_data", .size = sizeof(u64), .align = __alignof__(u64), .is_signed = (((u64)(-1)) < (u64)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "mask", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "events", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, {} };; ; +# 386 "./include/trace/events/io_uring.h" + ; + +static struct trace_event_fields trace_event_fields_io_uring_poll_wake[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "u8", .name = "opcode", .size = sizeof(u8), .align = __alignof__(u8), .is_signed = (((u8)(-1)) < (u8)1), .filter_type = FILTER_OTHER }, { .type = "u64", .name = "user_data", .size = sizeof(u64), .align = __alignof__(u64), .is_signed = (((u64)(-1)) < (u64)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "mask", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, {} };; ; +# 412 "./include/trace/events/io_uring.h" + ; + +static struct trace_event_fields trace_event_fields_io_uring_task_add[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "u8", .name = "opcode", .size = sizeof(u8), .align = __alignof__(u8), .is_signed = (((u8)(-1)) < (u8)1), .filter_type = FILTER_OTHER }, { .type = "u64", .name = "user_data", .size = sizeof(u64), .align = __alignof__(u64), .is_signed = (((u64)(-1)) < (u64)1), .filter_type = FILTER_OTHER }, { .type = "int", .name = "mask", .size = sizeof(int), .align = __alignof__(int), .is_signed = (((int)(-1)) < (int)1), .filter_type = FILTER_OTHER }, {} };; ; +# 438 "./include/trace/events/io_uring.h" + ; + +static struct trace_event_fields trace_event_fields_io_uring_task_run[] = { { .type = "void *", .name = "ctx", .size = sizeof(void *), .align = __alignof__(void *), .is_signed = (((void *)(-1)) < (void *)1), .filter_type = FILTER_OTHER }, { .type = "u8", .name = "opcode", .size = sizeof(u8), .align = __alignof__(u8), .is_signed = (((u8)(-1)) < (u8)1), .filter_type = FILTER_OTHER }, { .type = "u64", .name = "user_data", .size = sizeof(u64), .align = __alignof__(u64), .is_signed = (((u64)(-1)) < (u64)1), .filter_type = FILTER_OTHER }, {} };; ; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 454 "./include/trace/trace_events.h" 2 +# 533 "./include/trace/trace_events.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_create( struct trace_event_data_offsets_io_uring_create *__data_offsets, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_create __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_register( struct trace_event_data_offsets_io_uring_register *__data_offsets, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_register __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_file_get( struct trace_event_data_offsets_io_uring_file_get *__data_offsets, void *ctx, int fd) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_file_get __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_queue_async_work( struct trace_event_data_offsets_io_uring_queue_async_work *__data_offsets, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_queue_async_work __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_defer( struct trace_event_data_offsets_io_uring_defer *__data_offsets, void *ctx, void *req, unsigned long long user_data) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_defer __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_link( struct trace_event_data_offsets_io_uring_link *__data_offsets, void *ctx, void *req, void *target_req) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_link __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_cqring_wait( struct trace_event_data_offsets_io_uring_cqring_wait *__data_offsets, void *ctx, int min_events) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_cqring_wait __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_fail_link( struct trace_event_data_offsets_io_uring_fail_link *__data_offsets, void *req, void *link) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_fail_link __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_complete( struct trace_event_data_offsets_io_uring_complete *__data_offsets, void *ctx, u64 user_data, long res) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_complete __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_submit_sqe( struct trace_event_data_offsets_io_uring_submit_sqe *__data_offsets, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_submit_sqe __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 358 "./include/trace/events/io_uring.h" + ; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_poll_arm( struct trace_event_data_offsets_io_uring_poll_arm *__data_offsets, void *ctx, u8 opcode, u64 user_data, int mask, int events) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_poll_arm __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 386 "./include/trace/events/io_uring.h" + ; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_poll_wake( struct trace_event_data_offsets_io_uring_poll_wake *__data_offsets, void *ctx, u8 opcode, u64 user_data, int mask) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_poll_wake __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 412 "./include/trace/events/io_uring.h" + ; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_task_add( struct trace_event_data_offsets_io_uring_task_add *__data_offsets, void *ctx, u8 opcode, u64 user_data, int mask) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_task_add __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 438 "./include/trace/events/io_uring.h" + ; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) __attribute__((no_instrument_function)) int trace_event_get_offsets_io_uring_task_run( struct trace_event_data_offsets_io_uring_task_run *__data_offsets, void *ctx, u8 opcode, u64 user_data) { int __data_size = 0; int __attribute__((__unused__)) __item_length; struct trace_event_raw_io_uring_task_run __attribute__((__unused__)) *entry; ; return __data_size; }; ; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 534 "./include/trace/trace_events.h" 2 +# 727 "./include/trace/trace_events.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_create(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_create __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_create *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_create(&__data_offsets, fd, ctx, sq_entries, cq_entries, flags); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->fd = fd; entry->ctx = ctx; entry->sq_entries = sq_entries; entry->cq_entries = cq_entries; entry->flags = flags;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_create(void) { check_trace_callback_type_io_uring_create(trace_event_raw_event_io_uring_create); }; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_register(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_register __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_register *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_register(&__data_offsets, ctx, opcode, nr_files, nr_bufs, eventfd, ret); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->opcode = opcode; entry->nr_files = nr_files; entry->nr_bufs = nr_bufs; entry->eventfd = eventfd; entry->ret = ret;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_register(void) { check_trace_callback_type_io_uring_register(trace_event_raw_event_io_uring_register); }; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_file_get(void *__data, void *ctx, int fd) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_file_get __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_file_get *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_file_get(&__data_offsets, ctx, fd); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->fd = fd;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_file_get(void) { check_trace_callback_type_io_uring_file_get(trace_event_raw_event_io_uring_file_get); }; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_queue_async_work(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_queue_async_work __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_queue_async_work *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_queue_async_work(&__data_offsets, ctx, rw, req, work, flags); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->rw = rw; entry->req = req; entry->work = work; entry->flags = flags;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_queue_async_work(void) { check_trace_callback_type_io_uring_queue_async_work(trace_event_raw_event_io_uring_queue_async_work); }; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_defer(void *__data, void *ctx, void *req, unsigned long long user_data) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_defer __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_defer *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_defer(&__data_offsets, ctx, req, user_data); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->req = req; entry->data = user_data;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_defer(void) { check_trace_callback_type_io_uring_defer(trace_event_raw_event_io_uring_defer); }; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_link(void *__data, void *ctx, void *req, void *target_req) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_link __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_link *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_link(&__data_offsets, ctx, req, target_req); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->req = req; entry->target_req = target_req;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_link(void) { check_trace_callback_type_io_uring_link(trace_event_raw_event_io_uring_link); }; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_cqring_wait(void *__data, void *ctx, int min_events) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_cqring_wait __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_cqring_wait *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_cqring_wait(&__data_offsets, ctx, min_events); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->min_events = min_events;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_cqring_wait(void) { check_trace_callback_type_io_uring_cqring_wait(trace_event_raw_event_io_uring_cqring_wait); }; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_fail_link(void *__data, void *req, void *link) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_fail_link __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_fail_link *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_fail_link(&__data_offsets, req, link); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->req = req; entry->link = link;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_fail_link(void) { check_trace_callback_type_io_uring_fail_link(trace_event_raw_event_io_uring_fail_link); }; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_complete(void *__data, void *ctx, u64 user_data, long res) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_complete __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_complete *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_complete(&__data_offsets, ctx, user_data, res); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->user_data = user_data; entry->res = res;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_complete(void) { check_trace_callback_type_io_uring_complete(trace_event_raw_event_io_uring_complete); }; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_submit_sqe(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_submit_sqe __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_submit_sqe *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_submit_sqe(&__data_offsets, ctx, opcode, user_data, force_nonblock, sq_thread); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data; entry->force_nonblock = force_nonblock; entry->sq_thread = sq_thread;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_submit_sqe(void) { check_trace_callback_type_io_uring_submit_sqe(trace_event_raw_event_io_uring_submit_sqe); }; +# 358 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_poll_arm(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_poll_arm __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_poll_arm *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_poll_arm(&__data_offsets, ctx, opcode, user_data, mask, events); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data; entry->mask = mask; entry->events = events;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_poll_arm(void) { check_trace_callback_type_io_uring_poll_arm(trace_event_raw_event_io_uring_poll_arm); }; +# 386 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_poll_wake(void *__data, void *ctx, u8 opcode, u64 user_data, int mask) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_poll_wake __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_poll_wake *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_poll_wake(&__data_offsets, ctx, opcode, user_data, mask); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data; entry->mask = mask;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_poll_wake(void) { check_trace_callback_type_io_uring_poll_wake(trace_event_raw_event_io_uring_poll_wake); }; +# 412 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_task_add(void *__data, void *ctx, u8 opcode, u64 user_data, int mask) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_task_add __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_task_add *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_task_add(&__data_offsets, ctx, opcode, user_data, mask); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data; entry->mask = mask;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_task_add(void) { check_trace_callback_type_io_uring_task_add(trace_event_raw_event_io_uring_task_add); }; +# 438 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void trace_event_raw_event_io_uring_task_run(void *__data, void *ctx, u8 opcode, u64 user_data) { struct trace_event_file *trace_file = __data; struct trace_event_data_offsets_io_uring_task_run __attribute__((__unused__)) __data_offsets; struct trace_event_buffer fbuffer; struct trace_event_raw_io_uring_task_run *entry; int __data_size; if (trace_trigger_soft_disabled(trace_file)) return; __data_size = trace_event_get_offsets_io_uring_task_run(&__data_offsets, ctx, opcode, user_data); entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry) + __data_size); if (!entry) return; { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data;; } trace_event_buffer_commit(&fbuffer); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void ftrace_test_probe_io_uring_task_run(void) { check_trace_callback_type_io_uring_task_run(trace_event_raw_event_io_uring_task_run); }; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 728 "./include/trace/trace_events.h" 2 +# 792 "./include/trace/trace_events.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_create(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags);; static char print_fmt_io_uring_create[] = "\"" "ring %p, fd %d sq size %d, cq size %d, flags %d" "\", " "REC->ctx, REC->fd, REC->sq_entries, REC->cq_entries, REC->flags"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_create = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_create, .fields = { &(event_class_io_uring_create.fields), &(event_class_io_uring_create.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_create, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_create, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_create = { .class = &event_class_io_uring_create, { .tp = &__tracepoint_io_uring_create, }, .event.funcs = &trace_event_type_funcs_io_uring_create, .print_fmt = print_fmt_io_uring_create, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_create = &event_io_uring_create; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_register(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret);; static char print_fmt_io_uring_register[] = "\"" "ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, " "eventfd %d, ret %ld" "\", " "REC->ctx, REC->opcode, REC->nr_files, REC->nr_bufs, REC->eventfd, REC->ret"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_register = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_register, .fields = { &(event_class_io_uring_register.fields), &(event_class_io_uring_register.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_register, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_register, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_register = { .class = &event_class_io_uring_register, { .tp = &__tracepoint_io_uring_register, }, .event.funcs = &trace_event_type_funcs_io_uring_register, .print_fmt = print_fmt_io_uring_register, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_register = &event_io_uring_register; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_file_get(void *__data, void *ctx, int fd);; static char print_fmt_io_uring_file_get[] = "\"" "ring %p, fd %d" "\", " "REC->ctx, REC->fd"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_file_get = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_file_get, .fields = { &(event_class_io_uring_file_get.fields), &(event_class_io_uring_file_get.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_file_get, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_file_get, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_file_get = { .class = &event_class_io_uring_file_get, { .tp = &__tracepoint_io_uring_file_get, }, .event.funcs = &trace_event_type_funcs_io_uring_file_get, .print_fmt = print_fmt_io_uring_file_get, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_file_get = &event_io_uring_file_get; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_queue_async_work(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags);; static char print_fmt_io_uring_queue_async_work[] = "\"" "ring %p, request %p, flags %d, %s queue, work %p" "\", " "REC->ctx, REC->req, REC->flags, REC->rw ? \"hashed\" : \"normal\", REC->work"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_queue_async_work = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_queue_async_work, .fields = { &(event_class_io_uring_queue_async_work.fields), &(event_class_io_uring_queue_async_work.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_queue_async_work, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_queue_async_work, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_queue_async_work = { .class = &event_class_io_uring_queue_async_work, { .tp = &__tracepoint_io_uring_queue_async_work, }, .event.funcs = &trace_event_type_funcs_io_uring_queue_async_work, .print_fmt = print_fmt_io_uring_queue_async_work, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_queue_async_work = &event_io_uring_queue_async_work; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_defer(void *__data, void *ctx, void *req, unsigned long long user_data);; static char print_fmt_io_uring_defer[] = "\"" "ring %p, request %p user_data %llu" "\", " "REC->ctx, REC->req, REC->data"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_defer = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_defer, .fields = { &(event_class_io_uring_defer.fields), &(event_class_io_uring_defer.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_defer, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_defer, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_defer = { .class = &event_class_io_uring_defer, { .tp = &__tracepoint_io_uring_defer, }, .event.funcs = &trace_event_type_funcs_io_uring_defer, .print_fmt = print_fmt_io_uring_defer, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_defer = &event_io_uring_defer; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_link(void *__data, void *ctx, void *req, void *target_req);; static char print_fmt_io_uring_link[] = "\"" "ring %p, request %p linked after %p" "\", " "REC->ctx, REC->req, REC->target_req"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_link = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_link, .fields = { &(event_class_io_uring_link.fields), &(event_class_io_uring_link.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_link, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_link, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_link = { .class = &event_class_io_uring_link, { .tp = &__tracepoint_io_uring_link, }, .event.funcs = &trace_event_type_funcs_io_uring_link, .print_fmt = print_fmt_io_uring_link, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_link = &event_io_uring_link; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_cqring_wait(void *__data, void *ctx, int min_events);; static char print_fmt_io_uring_cqring_wait[] = "\"" "ring %p, min_events %d" "\", " "REC->ctx, REC->min_events"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_cqring_wait = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_cqring_wait, .fields = { &(event_class_io_uring_cqring_wait.fields), &(event_class_io_uring_cqring_wait.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_cqring_wait, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_cqring_wait, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_cqring_wait = { .class = &event_class_io_uring_cqring_wait, { .tp = &__tracepoint_io_uring_cqring_wait, }, .event.funcs = &trace_event_type_funcs_io_uring_cqring_wait, .print_fmt = print_fmt_io_uring_cqring_wait, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_cqring_wait = &event_io_uring_cqring_wait; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_fail_link(void *__data, void *req, void *link);; static char print_fmt_io_uring_fail_link[] = "\"" "request %p, link %p" "\", " "REC->req, REC->link"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_fail_link = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_fail_link, .fields = { &(event_class_io_uring_fail_link.fields), &(event_class_io_uring_fail_link.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_fail_link, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_fail_link, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_fail_link = { .class = &event_class_io_uring_fail_link, { .tp = &__tracepoint_io_uring_fail_link, }, .event.funcs = &trace_event_type_funcs_io_uring_fail_link, .print_fmt = print_fmt_io_uring_fail_link, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_fail_link = &event_io_uring_fail_link; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_complete(void *__data, void *ctx, u64 user_data, long res);; static char print_fmt_io_uring_complete[] = "\"" "ring %p, user_data 0x%llx, result %ld" "\", " "REC->ctx, (unsigned long long)REC->user_data, REC->res"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_complete = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_complete, .fields = { &(event_class_io_uring_complete.fields), &(event_class_io_uring_complete.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_complete, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_complete, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_complete = { .class = &event_class_io_uring_complete, { .tp = &__tracepoint_io_uring_complete, }, .event.funcs = &trace_event_type_funcs_io_uring_complete, .print_fmt = print_fmt_io_uring_complete, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_complete = &event_io_uring_complete; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_submit_sqe(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread);; static char print_fmt_io_uring_submit_sqe[] = "\"" "ring %p, op %d, data 0x%llx, non block %d, sq_thread %d" "\", " "REC->ctx, REC->opcode, (unsigned long long) REC->user_data, REC->force_nonblock, REC->sq_thread"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_submit_sqe = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_submit_sqe, .fields = { &(event_class_io_uring_submit_sqe.fields), &(event_class_io_uring_submit_sqe.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_submit_sqe, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_submit_sqe, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_submit_sqe = { .class = &event_class_io_uring_submit_sqe, { .tp = &__tracepoint_io_uring_submit_sqe, }, .event.funcs = &trace_event_type_funcs_io_uring_submit_sqe, .print_fmt = print_fmt_io_uring_submit_sqe, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_submit_sqe = &event_io_uring_submit_sqe; +# 358 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void perf_trace_io_uring_poll_arm(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events);; static char print_fmt_io_uring_poll_arm[] = "\"" "ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x" "\", " "REC->ctx, REC->opcode, (unsigned long long) REC->user_data, REC->mask, REC->events"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_poll_arm = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_poll_arm, .fields = { &(event_class_io_uring_poll_arm.fields), &(event_class_io_uring_poll_arm.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_poll_arm, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_poll_arm, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_poll_arm = { .class = &event_class_io_uring_poll_arm, { .tp = &__tracepoint_io_uring_poll_arm, }, .event.funcs = &trace_event_type_funcs_io_uring_poll_arm, .print_fmt = print_fmt_io_uring_poll_arm, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_poll_arm = &event_io_uring_poll_arm; +# 386 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void perf_trace_io_uring_poll_wake(void *__data, void *ctx, u8 opcode, u64 user_data, int mask);; static char print_fmt_io_uring_poll_wake[] = "\"" "ring %p, op %d, data 0x%llx, mask 0x%x" "\", " "REC->ctx, REC->opcode, (unsigned long long) REC->user_data, REC->mask"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_poll_wake = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_poll_wake, .fields = { &(event_class_io_uring_poll_wake.fields), &(event_class_io_uring_poll_wake.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_poll_wake, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_poll_wake, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_poll_wake = { .class = &event_class_io_uring_poll_wake, { .tp = &__tracepoint_io_uring_poll_wake, }, .event.funcs = &trace_event_type_funcs_io_uring_poll_wake, .print_fmt = print_fmt_io_uring_poll_wake, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_poll_wake = &event_io_uring_poll_wake; +# 412 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void perf_trace_io_uring_task_add(void *__data, void *ctx, u8 opcode, u64 user_data, int mask);; static char print_fmt_io_uring_task_add[] = "\"" "ring %p, op %d, data 0x%llx, mask %x" "\", " "REC->ctx, REC->opcode, (unsigned long long) REC->user_data, REC->mask"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_task_add = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_task_add, .fields = { &(event_class_io_uring_task_add.fields), &(event_class_io_uring_task_add.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_task_add, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_task_add, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_task_add = { .class = &event_class_io_uring_task_add, { .tp = &__tracepoint_io_uring_task_add, }, .event.funcs = &trace_event_type_funcs_io_uring_task_add, .print_fmt = print_fmt_io_uring_task_add, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_task_add = &event_io_uring_task_add; +# 438 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void perf_trace_io_uring_task_run(void *__data, void *ctx, u8 opcode, u64 user_data);; static char print_fmt_io_uring_task_run[] = "\"" "ring %p, op %d, data 0x%llx" "\", " "REC->ctx, REC->opcode, (unsigned long long) REC->user_data"; static struct trace_event_class __attribute__((__used__)) __attribute__((__section__(".ref.data"))) event_class_io_uring_task_run = { .system = str__io_uring__trace_system_name, .fields_array = trace_event_fields_io_uring_task_run, .fields = { &(event_class_io_uring_task_run.fields), &(event_class_io_uring_task_run.fields) }, .raw_init = trace_event_raw_init, .probe = trace_event_raw_event_io_uring_task_run, .reg = trace_event_reg, .perf_probe = perf_trace_io_uring_task_run, };; static struct trace_event_call __attribute__((__used__)) event_io_uring_task_run = { .class = &event_class_io_uring_task_run, { .tp = &__tracepoint_io_uring_task_run, }, .event.funcs = &trace_event_type_funcs_io_uring_task_run, .print_fmt = print_fmt_io_uring_task_run, .flags = TRACE_EVENT_FL_TRACEPOINT, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_io_uring_task_run = &event_io_uring_task_run; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 792 "./include/trace/trace_events.h" 2 +# 103 "./include/trace/define_trace.h" 2 +# 1 "./include/trace/perf.h" 1 +# 90 "./include/trace/perf.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_create(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_create __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_create *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_create(&__data_offsets, fd, ctx, sq_entries, cq_entries, flags); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->fd = fd; entry->ctx = ctx; entry->sq_entries = sq_entries; entry->cq_entries = cq_entries; entry->flags = flags;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_create(void) { check_trace_callback_type_io_uring_create(perf_trace_io_uring_create); }; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_register(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_register __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_register *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_register(&__data_offsets, ctx, opcode, nr_files, nr_bufs, eventfd, ret); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->opcode = opcode; entry->nr_files = nr_files; entry->nr_bufs = nr_bufs; entry->eventfd = eventfd; entry->ret = ret;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_register(void) { check_trace_callback_type_io_uring_register(perf_trace_io_uring_register); }; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_file_get(void *__data, void *ctx, int fd) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_file_get __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_file_get *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_file_get(&__data_offsets, ctx, fd); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->fd = fd;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_file_get(void) { check_trace_callback_type_io_uring_file_get(perf_trace_io_uring_file_get); }; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_queue_async_work(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_queue_async_work __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_queue_async_work *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_queue_async_work(&__data_offsets, ctx, rw, req, work, flags); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->rw = rw; entry->req = req; entry->work = work; entry->flags = flags;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_queue_async_work(void) { check_trace_callback_type_io_uring_queue_async_work(perf_trace_io_uring_queue_async_work); }; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_defer(void *__data, void *ctx, void *req, unsigned long long user_data) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_defer __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_defer *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_defer(&__data_offsets, ctx, req, user_data); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->req = req; entry->data = user_data;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_defer(void) { check_trace_callback_type_io_uring_defer(perf_trace_io_uring_defer); }; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_link(void *__data, void *ctx, void *req, void *target_req) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_link __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_link *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_link(&__data_offsets, ctx, req, target_req); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->req = req; entry->target_req = target_req;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_link(void) { check_trace_callback_type_io_uring_link(perf_trace_io_uring_link); }; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_cqring_wait(void *__data, void *ctx, int min_events) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_cqring_wait __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_cqring_wait *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_cqring_wait(&__data_offsets, ctx, min_events); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->min_events = min_events;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_cqring_wait(void) { check_trace_callback_type_io_uring_cqring_wait(perf_trace_io_uring_cqring_wait); }; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_fail_link(void *__data, void *req, void *link) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_fail_link __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_fail_link *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_fail_link(&__data_offsets, req, link); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->req = req; entry->link = link;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_fail_link(void) { check_trace_callback_type_io_uring_fail_link(perf_trace_io_uring_fail_link); }; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_complete(void *__data, void *ctx, u64 user_data, long res) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_complete __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_complete *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_complete(&__data_offsets, ctx, user_data, res); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->user_data = user_data; entry->res = res;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_complete(void) { check_trace_callback_type_io_uring_complete(perf_trace_io_uring_complete); }; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void perf_trace_io_uring_submit_sqe(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_submit_sqe __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_submit_sqe *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_submit_sqe(&__data_offsets, ctx, opcode, user_data, force_nonblock, sq_thread); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data; entry->force_nonblock = force_nonblock; entry->sq_thread = sq_thread;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_submit_sqe(void) { check_trace_callback_type_io_uring_submit_sqe(perf_trace_io_uring_submit_sqe); }; +# 358 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void perf_trace_io_uring_poll_arm(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_poll_arm __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_poll_arm *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_poll_arm(&__data_offsets, ctx, opcode, user_data, mask, events); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data; entry->mask = mask; entry->events = events;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_poll_arm(void) { check_trace_callback_type_io_uring_poll_arm(perf_trace_io_uring_poll_arm); }; +# 386 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void perf_trace_io_uring_poll_wake(void *__data, void *ctx, u8 opcode, u64 user_data, int mask) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_poll_wake __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_poll_wake *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_poll_wake(&__data_offsets, ctx, opcode, user_data, mask); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data; entry->mask = mask;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_poll_wake(void) { check_trace_callback_type_io_uring_poll_wake(perf_trace_io_uring_poll_wake); }; +# 412 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void perf_trace_io_uring_task_add(void *__data, void *ctx, u8 opcode, u64 user_data, int mask) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_task_add __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_task_add *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_task_add(&__data_offsets, ctx, opcode, user_data, mask); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data; entry->mask = mask;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_task_add(void) { check_trace_callback_type_io_uring_task_add(perf_trace_io_uring_task_add); }; +# 438 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void perf_trace_io_uring_task_run(void *__data, void *ctx, u8 opcode, u64 user_data) { struct trace_event_call *event_call = __data; struct trace_event_data_offsets_io_uring_task_run __attribute__((__unused__)) __data_offsets; struct trace_event_raw_io_uring_task_run *entry; struct pt_regs *__regs; u64 __count = 1; struct task_struct *__task = ((void *)0); struct hlist_head *head; int __entry_size; int __data_size; int rctx; __data_size = trace_event_get_offsets_io_uring_task_run(&__data_offsets, ctx, opcode, user_data); head = ({ do { const void *__vpp_verify = (typeof((event_call->perf_events) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long tcp_ptr__; asm volatile("add " "%%""gs"":" "%" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (event_call->perf_events)); (typeof(*(event_call->perf_events)) *)tcp_ptr__; }); }); if (!bpf_prog_array_valid(event_call) && __builtin_constant_p(!__task) && !__task && hlist_empty(head)) return; __entry_size = ((((__data_size + sizeof(*entry) + sizeof(u32))) + ((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)) & ~((typeof((__data_size + sizeof(*entry) + sizeof(u32))))((sizeof(u64))) - 1)); __entry_size -= sizeof(u32); entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); if (!entry) return; perf_fetch_caller_regs(__regs); { entry->ctx = ctx; entry->opcode = opcode; entry->user_data = user_data;; } perf_trace_run_bpf_submit(entry, __entry_size, rctx, event_call, __count, __regs, head, __task); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void perf_test_probe_io_uring_task_run(void) { check_trace_callback_type_io_uring_task_run(perf_trace_io_uring_task_run); }; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 91 "./include/trace/perf.h" 2 +# 104 "./include/trace/define_trace.h" 2 +# 1 "./include/trace/bpf_probe.h" 1 +# 114 "./include/trace/bpf_probe.h" +# 1 "./include/trace/events/io_uring.h" 1 + + + + + + + +# 1 "./include/linux/tracepoint.h" 1 +# 9 "./include/trace/events/io_uring.h" 2 + +struct io_wq_work; +# 24 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_create(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags) { struct bpf_prog *prog = __data; bpf_trace_run5(prog, ({ typeof(fd) __src = (fd); __typeof__(__builtin_choose_expr(sizeof(fd) == 1, (u8)1, __builtin_choose_expr(sizeof(fd) == 2, (u16)2, __builtin_choose_expr(sizeof(fd) == 4, (u32)3, __builtin_choose_expr(sizeof(fd) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(sq_entries) __src = (sq_entries); __typeof__(__builtin_choose_expr(sizeof(sq_entries) == 1, (u8)1, __builtin_choose_expr(sizeof(sq_entries) == 2, (u16)2, __builtin_choose_expr(sizeof(sq_entries) == 4, (u32)3, __builtin_choose_expr(sizeof(sq_entries) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(cq_entries) __src = (cq_entries); __typeof__(__builtin_choose_expr(sizeof(cq_entries) == 1, (u8)1, __builtin_choose_expr(sizeof(cq_entries) == 2, (u16)2, __builtin_choose_expr(sizeof(cq_entries) == 4, (u32)3, __builtin_choose_expr(sizeof(cq_entries) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(flags) __src = (flags); __typeof__(__builtin_choose_expr(sizeof(flags) == 1, (u8)1, __builtin_choose_expr(sizeof(flags) == 2, (u16)2, __builtin_choose_expr(sizeof(flags) == 4, (u32)3, __builtin_choose_expr(sizeof(flags) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_create(void) { check_trace_callback_type_io_uring_create(__bpf_trace_io_uring_create); } typedef void (*btf_trace_io_uring_create)(void *__data, int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags); static union { struct bpf_raw_event_map event; btf_trace_io_uring_create handler; } __bpf_trace_tp_map_io_uring_create __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_create, .bpf_func = __bpf_trace_io_uring_create, .num_args = 5, .writable_size = 0, }, };; +# 49 "./include/trace/events/io_uring.h" + ; +# 67 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_register(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret) { struct bpf_prog *prog = __data; bpf_trace_run6(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(opcode) __src = (opcode); __typeof__(__builtin_choose_expr(sizeof(opcode) == 1, (u8)1, __builtin_choose_expr(sizeof(opcode) == 2, (u16)2, __builtin_choose_expr(sizeof(opcode) == 4, (u32)3, __builtin_choose_expr(sizeof(opcode) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(nr_files) __src = (nr_files); __typeof__(__builtin_choose_expr(sizeof(nr_files) == 1, (u8)1, __builtin_choose_expr(sizeof(nr_files) == 2, (u16)2, __builtin_choose_expr(sizeof(nr_files) == 4, (u32)3, __builtin_choose_expr(sizeof(nr_files) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(nr_bufs) __src = (nr_bufs); __typeof__(__builtin_choose_expr(sizeof(nr_bufs) == 1, (u8)1, __builtin_choose_expr(sizeof(nr_bufs) == 2, (u16)2, __builtin_choose_expr(sizeof(nr_bufs) == 4, (u32)3, __builtin_choose_expr(sizeof(nr_bufs) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(eventfd) __src = (eventfd); __typeof__(__builtin_choose_expr(sizeof(eventfd) == 1, (u8)1, __builtin_choose_expr(sizeof(eventfd) == 2, (u16)2, __builtin_choose_expr(sizeof(eventfd) == 4, (u32)3, __builtin_choose_expr(sizeof(eventfd) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(ret) __src = (ret); __typeof__(__builtin_choose_expr(sizeof(ret) == 1, (u8)1, __builtin_choose_expr(sizeof(ret) == 2, (u16)2, __builtin_choose_expr(sizeof(ret) == 4, (u32)3, __builtin_choose_expr(sizeof(ret) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_register(void) { check_trace_callback_type_io_uring_register(__bpf_trace_io_uring_register); } typedef void (*btf_trace_io_uring_register)(void *__data, void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, bool eventfd, long ret); static union { struct bpf_raw_event_map event; btf_trace_io_uring_register handler; } __bpf_trace_tp_map_io_uring_register __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_register, .bpf_func = __bpf_trace_io_uring_register, .num_args = 6, .writable_size = 0, }, };; +# 96 "./include/trace/events/io_uring.h" + ; +# 108 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_file_get(void *__data, void *ctx, int fd) { struct bpf_prog *prog = __data; bpf_trace_run2(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(fd) __src = (fd); __typeof__(__builtin_choose_expr(sizeof(fd) == 1, (u8)1, __builtin_choose_expr(sizeof(fd) == 2, (u16)2, __builtin_choose_expr(sizeof(fd) == 4, (u32)3, __builtin_choose_expr(sizeof(fd) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_file_get(void) { check_trace_callback_type_io_uring_file_get(__bpf_trace_io_uring_file_get); } typedef void (*btf_trace_io_uring_file_get)(void *__data, void *ctx, int fd); static union { struct bpf_raw_event_map event; btf_trace_io_uring_file_get handler; } __bpf_trace_tp_map_io_uring_file_get __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_file_get, .bpf_func = __bpf_trace_io_uring_file_get, .num_args = 2, .writable_size = 0, }, };; +# 125 "./include/trace/events/io_uring.h" + ; +# 137 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_queue_async_work(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags) { struct bpf_prog *prog = __data; bpf_trace_run5(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(rw) __src = (rw); __typeof__(__builtin_choose_expr(sizeof(rw) == 1, (u8)1, __builtin_choose_expr(sizeof(rw) == 2, (u16)2, __builtin_choose_expr(sizeof(rw) == 4, (u32)3, __builtin_choose_expr(sizeof(rw) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(req) __src = (req); __typeof__(__builtin_choose_expr(sizeof(req) == 1, (u8)1, __builtin_choose_expr(sizeof(req) == 2, (u16)2, __builtin_choose_expr(sizeof(req) == 4, (u32)3, __builtin_choose_expr(sizeof(req) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(work) __src = (work); __typeof__(__builtin_choose_expr(sizeof(work) == 1, (u8)1, __builtin_choose_expr(sizeof(work) == 2, (u16)2, __builtin_choose_expr(sizeof(work) == 4, (u32)3, __builtin_choose_expr(sizeof(work) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(flags) __src = (flags); __typeof__(__builtin_choose_expr(sizeof(flags) == 1, (u8)1, __builtin_choose_expr(sizeof(flags) == 2, (u16)2, __builtin_choose_expr(sizeof(flags) == 4, (u32)3, __builtin_choose_expr(sizeof(flags) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_queue_async_work(void) { check_trace_callback_type_io_uring_queue_async_work(__bpf_trace_io_uring_queue_async_work); } typedef void (*btf_trace_io_uring_queue_async_work)(void *__data, void *ctx, int rw, void * req, struct io_wq_work *work, unsigned int flags); static union { struct bpf_raw_event_map event; btf_trace_io_uring_queue_async_work handler; } __bpf_trace_tp_map_io_uring_queue_async_work __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_queue_async_work, .bpf_func = __bpf_trace_io_uring_queue_async_work, .num_args = 5, .writable_size = 0, }, };; +# 163 "./include/trace/events/io_uring.h" + ; +# 175 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_defer(void *__data, void *ctx, void *req, unsigned long long user_data) { struct bpf_prog *prog = __data; bpf_trace_run3(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(req) __src = (req); __typeof__(__builtin_choose_expr(sizeof(req) == 1, (u8)1, __builtin_choose_expr(sizeof(req) == 2, (u16)2, __builtin_choose_expr(sizeof(req) == 4, (u32)3, __builtin_choose_expr(sizeof(req) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(user_data) __src = (user_data); __typeof__(__builtin_choose_expr(sizeof(user_data) == 1, (u8)1, __builtin_choose_expr(sizeof(user_data) == 2, (u16)2, __builtin_choose_expr(sizeof(user_data) == 4, (u32)3, __builtin_choose_expr(sizeof(user_data) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_defer(void) { check_trace_callback_type_io_uring_defer(__bpf_trace_io_uring_defer); } typedef void (*btf_trace_io_uring_defer)(void *__data, void *ctx, void *req, unsigned long long user_data); static union { struct bpf_raw_event_map event; btf_trace_io_uring_defer handler; } __bpf_trace_tp_map_io_uring_defer __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_defer, .bpf_func = __bpf_trace_io_uring_defer, .num_args = 3, .writable_size = 0, }, };; +# 195 "./include/trace/events/io_uring.h" + ; +# 208 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_link(void *__data, void *ctx, void *req, void *target_req) { struct bpf_prog *prog = __data; bpf_trace_run3(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(req) __src = (req); __typeof__(__builtin_choose_expr(sizeof(req) == 1, (u8)1, __builtin_choose_expr(sizeof(req) == 2, (u16)2, __builtin_choose_expr(sizeof(req) == 4, (u32)3, __builtin_choose_expr(sizeof(req) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(target_req) __src = (target_req); __typeof__(__builtin_choose_expr(sizeof(target_req) == 1, (u8)1, __builtin_choose_expr(sizeof(target_req) == 2, (u16)2, __builtin_choose_expr(sizeof(target_req) == 4, (u32)3, __builtin_choose_expr(sizeof(target_req) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_link(void) { check_trace_callback_type_io_uring_link(__bpf_trace_io_uring_link); } typedef void (*btf_trace_io_uring_link)(void *__data, void *ctx, void *req, void *target_req); static union { struct bpf_raw_event_map event; btf_trace_io_uring_link handler; } __bpf_trace_tp_map_io_uring_link __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_link, .bpf_func = __bpf_trace_io_uring_link, .num_args = 3, .writable_size = 0, }, };; +# 228 "./include/trace/events/io_uring.h" + ; +# 240 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_cqring_wait(void *__data, void *ctx, int min_events) { struct bpf_prog *prog = __data; bpf_trace_run2(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(min_events) __src = (min_events); __typeof__(__builtin_choose_expr(sizeof(min_events) == 1, (u8)1, __builtin_choose_expr(sizeof(min_events) == 2, (u16)2, __builtin_choose_expr(sizeof(min_events) == 4, (u32)3, __builtin_choose_expr(sizeof(min_events) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_cqring_wait(void) { check_trace_callback_type_io_uring_cqring_wait(__bpf_trace_io_uring_cqring_wait); } typedef void (*btf_trace_io_uring_cqring_wait)(void *__data, void *ctx, int min_events); static union { struct bpf_raw_event_map event; btf_trace_io_uring_cqring_wait handler; } __bpf_trace_tp_map_io_uring_cqring_wait __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_cqring_wait, .bpf_func = __bpf_trace_io_uring_cqring_wait, .num_args = 2, .writable_size = 0, }, };; +# 257 "./include/trace/events/io_uring.h" + ; +# 268 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_fail_link(void *__data, void *req, void *link) { struct bpf_prog *prog = __data; bpf_trace_run2(prog, ({ typeof(req) __src = (req); __typeof__(__builtin_choose_expr(sizeof(req) == 1, (u8)1, __builtin_choose_expr(sizeof(req) == 2, (u16)2, __builtin_choose_expr(sizeof(req) == 4, (u32)3, __builtin_choose_expr(sizeof(req) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(link) __src = (link); __typeof__(__builtin_choose_expr(sizeof(link) == 1, (u8)1, __builtin_choose_expr(sizeof(link) == 2, (u16)2, __builtin_choose_expr(sizeof(link) == 4, (u32)3, __builtin_choose_expr(sizeof(link) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_fail_link(void) { check_trace_callback_type_io_uring_fail_link(__bpf_trace_io_uring_fail_link); } typedef void (*btf_trace_io_uring_fail_link)(void *__data, void *req, void *link); static union { struct bpf_raw_event_map event; btf_trace_io_uring_fail_link handler; } __bpf_trace_tp_map_io_uring_fail_link __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_fail_link, .bpf_func = __bpf_trace_io_uring_fail_link, .num_args = 2, .writable_size = 0, }, };; +# 285 "./include/trace/events/io_uring.h" + ; +# 295 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_complete(void *__data, void *ctx, u64 user_data, long res) { struct bpf_prog *prog = __data; bpf_trace_run3(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(user_data) __src = (user_data); __typeof__(__builtin_choose_expr(sizeof(user_data) == 1, (u8)1, __builtin_choose_expr(sizeof(user_data) == 2, (u16)2, __builtin_choose_expr(sizeof(user_data) == 4, (u32)3, __builtin_choose_expr(sizeof(user_data) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(res) __src = (res); __typeof__(__builtin_choose_expr(sizeof(res) == 1, (u8)1, __builtin_choose_expr(sizeof(res) == 2, (u16)2, __builtin_choose_expr(sizeof(res) == 4, (u32)3, __builtin_choose_expr(sizeof(res) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_complete(void) { check_trace_callback_type_io_uring_complete(__bpf_trace_io_uring_complete); } typedef void (*btf_trace_io_uring_complete)(void *__data, void *ctx, u64 user_data, long res); static union { struct bpf_raw_event_map event; btf_trace_io_uring_complete handler; } __bpf_trace_tp_map_io_uring_complete __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_complete, .bpf_func = __bpf_trace_io_uring_complete, .num_args = 3, .writable_size = 0, }, };; +# 316 "./include/trace/events/io_uring.h" + ; +# 331 "./include/trace/events/io_uring.h" +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_submit_sqe(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread) { struct bpf_prog *prog = __data; bpf_trace_run5(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(opcode) __src = (opcode); __typeof__(__builtin_choose_expr(sizeof(opcode) == 1, (u8)1, __builtin_choose_expr(sizeof(opcode) == 2, (u16)2, __builtin_choose_expr(sizeof(opcode) == 4, (u32)3, __builtin_choose_expr(sizeof(opcode) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(user_data) __src = (user_data); __typeof__(__builtin_choose_expr(sizeof(user_data) == 1, (u8)1, __builtin_choose_expr(sizeof(user_data) == 2, (u16)2, __builtin_choose_expr(sizeof(user_data) == 4, (u32)3, __builtin_choose_expr(sizeof(user_data) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(force_nonblock) __src = (force_nonblock); __typeof__(__builtin_choose_expr(sizeof(force_nonblock) == 1, (u8)1, __builtin_choose_expr(sizeof(force_nonblock) == 2, (u16)2, __builtin_choose_expr(sizeof(force_nonblock) == 4, (u32)3, __builtin_choose_expr(sizeof(force_nonblock) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(sq_thread) __src = (sq_thread); __typeof__(__builtin_choose_expr(sizeof(sq_thread) == 1, (u8)1, __builtin_choose_expr(sizeof(sq_thread) == 2, (u16)2, __builtin_choose_expr(sizeof(sq_thread) == 4, (u32)3, __builtin_choose_expr(sizeof(sq_thread) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_submit_sqe(void) { check_trace_callback_type_io_uring_submit_sqe(__bpf_trace_io_uring_submit_sqe); } typedef void (*btf_trace_io_uring_submit_sqe)(void *__data, void *ctx, u8 opcode, u64 user_data, bool force_nonblock, bool sq_thread); static union { struct bpf_raw_event_map event; btf_trace_io_uring_submit_sqe handler; } __bpf_trace_tp_map_io_uring_submit_sqe __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_submit_sqe, .bpf_func = __bpf_trace_io_uring_submit_sqe, .num_args = 5, .writable_size = 0, }, };; +# 358 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_poll_arm(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events) { struct bpf_prog *prog = __data; bpf_trace_run5(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(opcode) __src = (opcode); __typeof__(__builtin_choose_expr(sizeof(opcode) == 1, (u8)1, __builtin_choose_expr(sizeof(opcode) == 2, (u16)2, __builtin_choose_expr(sizeof(opcode) == 4, (u32)3, __builtin_choose_expr(sizeof(opcode) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(user_data) __src = (user_data); __typeof__(__builtin_choose_expr(sizeof(user_data) == 1, (u8)1, __builtin_choose_expr(sizeof(user_data) == 2, (u16)2, __builtin_choose_expr(sizeof(user_data) == 4, (u32)3, __builtin_choose_expr(sizeof(user_data) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(mask) __src = (mask); __typeof__(__builtin_choose_expr(sizeof(mask) == 1, (u8)1, __builtin_choose_expr(sizeof(mask) == 2, (u16)2, __builtin_choose_expr(sizeof(mask) == 4, (u32)3, __builtin_choose_expr(sizeof(mask) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(events) __src = (events); __typeof__(__builtin_choose_expr(sizeof(events) == 1, (u8)1, __builtin_choose_expr(sizeof(events) == 2, (u16)2, __builtin_choose_expr(sizeof(events) == 4, (u32)3, __builtin_choose_expr(sizeof(events) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_poll_arm(void) { check_trace_callback_type_io_uring_poll_arm(__bpf_trace_io_uring_poll_arm); } typedef void (*btf_trace_io_uring_poll_arm)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask, int events); static union { struct bpf_raw_event_map event; btf_trace_io_uring_poll_arm handler; } __bpf_trace_tp_map_io_uring_poll_arm __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_poll_arm, .bpf_func = __bpf_trace_io_uring_poll_arm, .num_args = 5, .writable_size = 0, }, };; +# 386 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_poll_wake(void *__data, void *ctx, u8 opcode, u64 user_data, int mask) { struct bpf_prog *prog = __data; bpf_trace_run4(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(opcode) __src = (opcode); __typeof__(__builtin_choose_expr(sizeof(opcode) == 1, (u8)1, __builtin_choose_expr(sizeof(opcode) == 2, (u16)2, __builtin_choose_expr(sizeof(opcode) == 4, (u32)3, __builtin_choose_expr(sizeof(opcode) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(user_data) __src = (user_data); __typeof__(__builtin_choose_expr(sizeof(user_data) == 1, (u8)1, __builtin_choose_expr(sizeof(user_data) == 2, (u16)2, __builtin_choose_expr(sizeof(user_data) == 4, (u32)3, __builtin_choose_expr(sizeof(user_data) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(mask) __src = (mask); __typeof__(__builtin_choose_expr(sizeof(mask) == 1, (u8)1, __builtin_choose_expr(sizeof(mask) == 2, (u16)2, __builtin_choose_expr(sizeof(mask) == 4, (u32)3, __builtin_choose_expr(sizeof(mask) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_poll_wake(void) { check_trace_callback_type_io_uring_poll_wake(__bpf_trace_io_uring_poll_wake); } typedef void (*btf_trace_io_uring_poll_wake)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask); static union { struct bpf_raw_event_map event; btf_trace_io_uring_poll_wake handler; } __bpf_trace_tp_map_io_uring_poll_wake __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_poll_wake, .bpf_func = __bpf_trace_io_uring_poll_wake, .num_args = 4, .writable_size = 0, }, };; +# 412 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_task_add(void *__data, void *ctx, u8 opcode, u64 user_data, int mask) { struct bpf_prog *prog = __data; bpf_trace_run4(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(opcode) __src = (opcode); __typeof__(__builtin_choose_expr(sizeof(opcode) == 1, (u8)1, __builtin_choose_expr(sizeof(opcode) == 2, (u16)2, __builtin_choose_expr(sizeof(opcode) == 4, (u32)3, __builtin_choose_expr(sizeof(opcode) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(user_data) __src = (user_data); __typeof__(__builtin_choose_expr(sizeof(user_data) == 1, (u8)1, __builtin_choose_expr(sizeof(user_data) == 2, (u16)2, __builtin_choose_expr(sizeof(user_data) == 4, (u32)3, __builtin_choose_expr(sizeof(user_data) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(mask) __src = (mask); __typeof__(__builtin_choose_expr(sizeof(mask) == 1, (u8)1, __builtin_choose_expr(sizeof(mask) == 2, (u16)2, __builtin_choose_expr(sizeof(mask) == 4, (u32)3, __builtin_choose_expr(sizeof(mask) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_task_add(void) { check_trace_callback_type_io_uring_task_add(__bpf_trace_io_uring_task_add); } typedef void (*btf_trace_io_uring_task_add)(void *__data, void *ctx, u8 opcode, u64 user_data, int mask); static union { struct bpf_raw_event_map event; btf_trace_io_uring_task_add handler; } __bpf_trace_tp_map_io_uring_task_add __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_task_add, .bpf_func = __bpf_trace_io_uring_task_add, .num_args = 4, .writable_size = 0, }, };; +# 438 "./include/trace/events/io_uring.h" + ; + +static __attribute__((no_instrument_function)) void __bpf_trace_io_uring_task_run(void *__data, void *ctx, u8 opcode, u64 user_data) { struct bpf_prog *prog = __data; bpf_trace_run3(prog, ({ typeof(ctx) __src = (ctx); __typeof__(__builtin_choose_expr(sizeof(ctx) == 1, (u8)1, __builtin_choose_expr(sizeof(ctx) == 2, (u16)2, __builtin_choose_expr(sizeof(ctx) == 4, (u32)3, __builtin_choose_expr(sizeof(ctx) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(opcode) __src = (opcode); __typeof__(__builtin_choose_expr(sizeof(opcode) == 1, (u8)1, __builtin_choose_expr(sizeof(opcode) == 2, (u16)2, __builtin_choose_expr(sizeof(opcode) == 4, (u32)3, __builtin_choose_expr(sizeof(opcode) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; }), ({ typeof(user_data) __src = (user_data); __typeof__(__builtin_choose_expr(sizeof(user_data) == 1, (u8)1, __builtin_choose_expr(sizeof(user_data) == 2, (u16)2, __builtin_choose_expr(sizeof(user_data) == 4, (u32)3, __builtin_choose_expr(sizeof(user_data) == 8, (u64)4, (void)5))))) __dst; memcpy(&__dst, &__src, sizeof(__dst)); (u64)__dst; })); }; static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void bpf_test_probe_io_uring_task_run(void) { check_trace_callback_type_io_uring_task_run(__bpf_trace_io_uring_task_run); } typedef void (*btf_trace_io_uring_task_run)(void *__data, void *ctx, u8 opcode, u64 user_data); static union { struct bpf_raw_event_map event; btf_trace_io_uring_task_run handler; } __bpf_trace_tp_map_io_uring_task_run __attribute__((__used__)) __attribute__((section("__bpf_raw_tp_map"))) = { .event = { .tp = &__tracepoint_io_uring_task_run, .bpf_func = __bpf_trace_io_uring_task_run, .num_args = 3, .writable_size = 0, }, };; +# 461 "./include/trace/events/io_uring.h" + ; + + + + +# 1 "./include/trace/define_trace.h" 1 +# 466 "./include/trace/events/io_uring.h" 2 +# 115 "./include/trace/bpf_probe.h" 2 +# 105 "./include/trace/define_trace.h" 2 +# 466 "./include/trace/events/io_uring.h" 2 +# 85 "fs/io_uring.c" 2 + +# 1 "./include/uapi/linux/io_uring.h" 1 +# 17 "./include/uapi/linux/io_uring.h" +struct io_uring_sqe { + __u8 opcode; + __u8 flags; + __u16 ioprio; + __s32 fd; + union { + __u64 off; + __u64 addr2; + }; + union { + __u64 addr; + __u64 splice_off_in; + }; + __u32 len; + union { + __kernel_rwf_t rw_flags; + __u32 fsync_flags; + __u16 poll_events; + __u32 poll32_events; + __u32 sync_range_flags; + __u32 msg_flags; + __u32 timeout_flags; + __u32 accept_flags; + __u32 cancel_flags; + __u32 open_flags; + __u32 statx_flags; + __u32 fadvise_advice; + __u32 splice_flags; + }; + __u64 user_data; + union { + struct { + + union { + + __u16 buf_index; + + __u16 buf_group; + } __attribute__((packed)); + + __u16 personality; + __s32 splice_fd_in; + }; + __u64 __pad2[3]; + }; +}; + +enum { + IOSQE_FIXED_FILE_BIT, + IOSQE_IO_DRAIN_BIT, + IOSQE_IO_LINK_BIT, + IOSQE_IO_HARDLINK_BIT, + IOSQE_ASYNC_BIT, + IOSQE_BUFFER_SELECT_BIT, +}; +# 99 "./include/uapi/linux/io_uring.h" +enum { + IORING_OP_NOP, + IORING_OP_READV, + IORING_OP_WRITEV, + IORING_OP_FSYNC, + IORING_OP_READ_FIXED, + IORING_OP_WRITE_FIXED, + IORING_OP_POLL_ADD, + IORING_OP_POLL_REMOVE, + IORING_OP_SYNC_FILE_RANGE, + IORING_OP_SENDMSG, + IORING_OP_RECVMSG, + IORING_OP_TIMEOUT, + IORING_OP_TIMEOUT_REMOVE, + IORING_OP_ACCEPT, + IORING_OP_ASYNC_CANCEL, + IORING_OP_LINK_TIMEOUT, + IORING_OP_CONNECT, + IORING_OP_FALLOCATE, + IORING_OP_OPENAT, + IORING_OP_CLOSE, + IORING_OP_FILES_UPDATE, + IORING_OP_STATX, + IORING_OP_READ, + IORING_OP_WRITE, + IORING_OP_FADVISE, + IORING_OP_MADVISE, + IORING_OP_SEND, + IORING_OP_RECV, + IORING_OP_OPENAT2, + IORING_OP_EPOLL_CTL, + IORING_OP_SPLICE, + IORING_OP_PROVIDE_BUFFERS, + IORING_OP_REMOVE_BUFFERS, + IORING_OP_TEE, + + + IORING_OP_LAST, +}; +# 158 "./include/uapi/linux/io_uring.h" +struct io_uring_cqe { + __u64 user_data; + __s32 res; + __u32 flags; +}; +# 171 "./include/uapi/linux/io_uring.h" +enum { + IORING_CQE_BUFFER_SHIFT = 16, +}; +# 185 "./include/uapi/linux/io_uring.h" +struct io_sqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 flags; + __u32 dropped; + __u32 array; + __u32 resv1; + __u64 resv2; +}; + + + + + + +struct io_cqring_offsets { + __u32 head; + __u32 tail; + __u32 ring_mask; + __u32 ring_entries; + __u32 overflow; + __u32 cqes; + __u32 flags; + __u32 resv1; + __u64 resv2; +}; +# 230 "./include/uapi/linux/io_uring.h" +struct io_uring_params { + __u32 sq_entries; + __u32 cq_entries; + __u32 flags; + __u32 sq_thread_cpu; + __u32 sq_thread_idle; + __u32 features; + __u32 wq_fd; + __u32 resv[3]; + struct io_sqring_offsets sq_off; + struct io_cqring_offsets cq_off; +}; +# 269 "./include/uapi/linux/io_uring.h" +struct io_uring_files_update { + __u32 offset; + __u32 resv; + __u64 __attribute__((aligned(8))) fds; +}; + + + +struct io_uring_probe_op { + __u8 op; + __u8 resv; + __u16 flags; + __u32 resv2; +}; + +struct io_uring_probe { + __u8 last_op; + __u8 ops_len; + __u16 resv; + __u32 resv2[3]; + struct io_uring_probe_op ops[0]; +}; +# 87 "fs/io_uring.c" 2 + +# 1 "fs/internal.h" 1 + + + + + + + +struct super_block; +struct file_system_type; +struct iomap; +struct iomap_ops; +struct linux_binprm; +struct path; +struct mount; +struct shrink_control; +struct fs_context; +struct user_namespace; + + + + + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) bdev_cache_init(void); + +extern int __sync_blockdev(struct block_device *bdev, int wait); +# 41 "fs/internal.h" +extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, + get_block_t *get_block, struct iomap *iomap); + + + + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) chrdev_init(void); + + + + +extern const struct fs_context_operations legacy_fs_context_ops; +extern int parse_monolithic_mount_data(struct fs_context *, void *); +extern void fc_drop_locked(struct fs_context *); +extern void vfs_clean_context(struct fs_context *fc); +extern int finish_clean_context(struct fs_context *fc); + + + + +extern int filename_lookup(int dfd, struct filename *name, unsigned flags, + struct path *path, struct path *root); +extern int vfs_path_lookup(struct dentry *, struct vfsmount *, + const char *, unsigned int, struct path *); +long do_mknodat(int dfd, const char *filename, umode_t mode, + unsigned int dev); +long do_mkdirat(int dfd, const char *pathname, umode_t mode); +long do_rmdir(int dfd, const char *pathname); +long do_unlinkat(int dfd, struct filename *name); +long do_symlinkat(const char *oldname, int newdfd, + const char *newname); +int do_linkat(int olddfd, const char *oldname, int newdfd, + const char *newname, int flags); + + + + +extern void *copy_mount_options(const void *); +extern char *copy_mount_string(const void *); + +extern struct vfsmount *lookup_mnt(const struct path *); +extern int finish_automount(struct vfsmount *, struct path *); + +extern int sb_prepare_remount_readonly(struct super_block *); + +extern void __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) mnt_init(void); + +extern int __mnt_want_write_file(struct file *); +extern void __mnt_drop_write_file(struct file *); + +extern void dissolve_on_fput(struct vfsmount *); + + + +extern void chroot_fs_refs(const struct path *, const struct path *); + + + + +extern struct file *alloc_empty_file(int, const struct cred *); +extern struct file *alloc_empty_file_noaccount(int, const struct cred *); + + + + +extern int reconfigure_super(struct fs_context *); +extern bool trylock_super(struct super_block *sb); +extern struct super_block *user_get_super(dev_t); +extern bool mount_capable(struct fs_context *); + + + + +struct open_flags { + int open_flag; + umode_t mode; + int acc_mode; + int intent; + int lookup_flags; +}; +extern struct file *do_filp_open(int dfd, struct filename *pathname, + const struct open_flags *op); +extern struct file *do_file_open_root(struct dentry *, struct vfsmount *, + const char *, const struct open_flags *); +extern struct open_how build_open_how(int flags, umode_t mode); +extern int build_open_flags(const struct open_how *how, struct open_flags *op); + +long do_sys_ftruncate(unsigned int fd, loff_t length, int small); +int do_fchmodat(int dfd, const char *filename, umode_t mode); +int do_fchownat(int dfd, const char *filename, uid_t user, gid_t group, + int flag); + +extern int vfs_open(const struct path *, struct file *); + + + + +extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc); +extern void inode_add_lru(struct inode *inode); +extern int dentry_needs_remove_privs(struct dentry *dentry); + + + + +extern long get_nr_dirty_inodes(void); +extern int invalidate_inodes(struct super_block *, bool); + + + + +extern int d_set_mounted(struct dentry *dentry); +extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc); +extern struct dentry *d_alloc_cursor(struct dentry *); +extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); +extern char *simple_dname(struct dentry *, char *, int); +extern void dput_to_list(struct dentry *, struct list_head *); +extern void shrink_dentry_list(struct list_head *); + + + + +extern int rw_verify_area(int, struct file *, const loff_t *, size_t); + + + + +extern const struct file_operations pipefifo_fops; + + + + +extern void group_pin_kill(struct hlist_head *p); +extern void mnt_pin_kill(struct mount *m); + + + + +extern const struct dentry_operations ns_dentry_operations; + + +int sb_init_dio_done_wq(struct super_block *sb); + + + + +int do_statx(int dfd, const char *filename, unsigned flags, + unsigned int mask, struct statx *buffer); +# 89 "fs/io_uring.c" 2 +# 1 "fs/io-wq.h" 1 + + + +struct io_wq; + +enum { + IO_WQ_WORK_CANCEL = 1, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_NO_CANCEL = 8, + IO_WQ_WORK_CONCURRENT = 16, + + IO_WQ_HASH_SHIFT = 24, +}; + +enum io_wq_cancel { + IO_WQ_CANCEL_OK, + IO_WQ_CANCEL_RUNNING, + IO_WQ_CANCEL_NOTFOUND, +}; + +struct io_wq_work_node { + struct io_wq_work_node *next; +}; + +struct io_wq_work_list { + struct io_wq_work_node *first; + struct io_wq_work_node *last; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wq_list_add_after(struct io_wq_work_node *node, + struct io_wq_work_node *pos, + struct io_wq_work_list *list) +{ + struct io_wq_work_node *next = pos->next; + + pos->next = node; + node->next = next; + if (!next) + list->last = node; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wq_list_add_tail(struct io_wq_work_node *node, + struct io_wq_work_list *list) +{ + if (!list->first) { + list->last = node; + do { do { extern void __compiletime_assert_1697(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->first) == sizeof(char) || sizeof(list->first) == sizeof(short) || sizeof(list->first) == sizeof(int) || sizeof(list->first) == sizeof(long)) || sizeof(list->first) == sizeof(long long))) __compiletime_assert_1697(); } while (0); do { *(volatile typeof(list->first) *)&(list->first) = (node); } while (0); } while (0); + } else { + list->last->next = node; + list->last = node; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wq_list_cut(struct io_wq_work_list *list, + struct io_wq_work_node *last, + struct io_wq_work_node *prev) +{ + + if (!prev) + do { do { extern void __compiletime_assert_1698(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(list->first) == sizeof(char) || sizeof(list->first) == sizeof(short) || sizeof(list->first) == sizeof(int) || sizeof(list->first) == sizeof(long)) || sizeof(list->first) == sizeof(long long))) __compiletime_assert_1698(); } while (0); do { *(volatile typeof(list->first) *)&(list->first) = (last->next); } while (0); } while (0); + else + prev->next = last->next; + + if (last == list->last) + list->last = prev; + last->next = ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void wq_list_del(struct io_wq_work_list *list, + struct io_wq_work_node *node, + struct io_wq_work_node *prev) +{ + wq_list_cut(list, node, prev); +} +# 86 "fs/io-wq.h" +struct io_wq_work { + struct io_wq_work_node list; + struct files_struct *files; + struct mm_struct *mm; + const struct cred *creds; + struct fs_struct *fs; + unsigned flags; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct io_wq_work *wq_next_work(struct io_wq_work *work) +{ + if (!work->list.next) + return ((void *)0); + + return ({ void *__mptr = (void *)(work->list.next); do { extern void __compiletime_assert_1699(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work->list.next)), typeof(((struct io_wq_work *)0)->list)) && !__builtin_types_compatible_p(typeof(*(work->list.next)), typeof(void))))) __compiletime_assert_1699(); } while (0); ((struct io_wq_work *)(__mptr - __builtin_offsetof(struct io_wq_work, list))); }); +} + +typedef void (free_work_fn)(struct io_wq_work *); +typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *); + +struct io_wq_data { + struct user_struct *user; + + io_wq_work_fn *do_work; + free_work_fn *free_work; +}; + +struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); +bool io_wq_get(struct io_wq *wq, struct io_wq_data *data); +void io_wq_destroy(struct io_wq *wq); + +void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); +void io_wq_hash_work(struct io_wq_work *work, void *val); + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool io_wq_is_hashed(struct io_wq_work *work) +{ + return work->flags & IO_WQ_WORK_HASHED; +} + +void io_wq_cancel_all(struct io_wq *wq); +enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); + +typedef bool (work_cancel_fn)(struct io_wq_work *, void *); + +enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + void *data, bool cancel_all); + +struct task_struct *io_wq_get_task(struct io_wq *wq); + + +extern void io_wq_worker_sleeping(struct task_struct *); +extern void io_wq_worker_running(struct task_struct *); +# 147 "fs/io-wq.h" +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool io_wq_current_is_worker(void) +{ + return (!(preempt_count() & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8)) | (1UL << (0 + 8))))) && (get_current()->flags & 0x20000000); +} +# 90 "fs/io_uring.c" 2 +# 102 "fs/io_uring.c" +struct io_uring { + u32 head __attribute__((__aligned__((1 << (6))))); + u32 tail __attribute__((__aligned__((1 << (6))))); +}; +# 114 "fs/io_uring.c" +struct io_rings { +# 123 "fs/io_uring.c" + struct io_uring sq, cq; + + + + + u32 sq_ring_mask, cq_ring_mask; + + u32 sq_ring_entries, cq_ring_entries; +# 143 "fs/io_uring.c" + u32 sq_dropped; +# 153 "fs/io_uring.c" + u32 sq_flags; + + + + + + + u32 cq_flags; +# 174 "fs/io_uring.c" + u32 cq_overflow; + + + + + + + + struct io_uring_cqe cqes[] __attribute__((__aligned__((1 << (6))))); +}; + +struct io_mapped_ubuf { + u64 ubuf; + size_t len; + struct bio_vec *bvec; + unsigned int nr_bvecs; +}; + +struct fixed_file_table { + struct file **files; +}; + +struct fixed_file_ref_node { + struct percpu_ref refs; + struct list_head node; + struct list_head file_list; + struct fixed_file_data *file_data; + struct llist_node llist; +}; + +struct fixed_file_data { + struct fixed_file_table *table; + struct io_ring_ctx *ctx; + + struct percpu_ref *cur_refs; + struct percpu_ref refs; + struct completion done; + struct list_head ref_list; + spinlock_t lock; +}; + +struct io_buffer { + struct list_head list; + __u64 addr; + __s32 len; + __u16 bid; +}; + +struct io_ring_ctx { + struct { + struct percpu_ref refs; + } __attribute__((__aligned__((1 << (6))))); + + struct { + unsigned int flags; + unsigned int compat: 1; + unsigned int limit_mem: 1; + unsigned int cq_overflow_flushed: 1; + unsigned int drain_next: 1; + unsigned int eventfd_async: 1; +# 246 "fs/io_uring.c" + u32 *sq_array; + unsigned cached_sq_head; + unsigned sq_entries; + unsigned sq_mask; + unsigned sq_thread_idle; + unsigned cached_sq_dropped; + atomic_t cached_cq_overflow; + unsigned long sq_check_overflow; + + struct list_head defer_list; + struct list_head timeout_list; + struct list_head cq_overflow_list; + + wait_queue_head_t inflight_wait; + struct io_uring_sqe *sq_sqes; + } __attribute__((__aligned__((1 << (6))))); + + struct io_rings *rings; + + + struct io_wq *io_wq; + struct task_struct *sqo_thread; + struct mm_struct *sqo_mm; + wait_queue_head_t sqo_wait; + + + + + + + struct fixed_file_data *file_data; + unsigned nr_user_files; + int ring_fd; + struct file *ring_file; + + + unsigned nr_user_bufs; + struct io_mapped_ubuf *user_bufs; + + struct user_struct *user; + + const struct cred *creds; + + struct completion ref_comp; + struct completion sq_thread_comp; + + + struct io_kiocb *fallback_req; + + + struct socket *ring_sock; + + + struct idr io_buffer_idr; + + struct idr personality_idr; + + struct { + unsigned cached_cq_tail; + unsigned cq_entries; + unsigned cq_mask; + atomic_t cq_timeouts; + unsigned long cq_check_overflow; + struct wait_queue_head cq_wait; + struct fasync_struct *cq_fasync; + struct eventfd_ctx *cq_ev_fd; + } __attribute__((__aligned__((1 << (6))))); + + struct { + struct mutex uring_lock; + wait_queue_head_t wait; + } __attribute__((__aligned__((1 << (6))))); + + struct { + spinlock_t completion_lock; + + + + + + + + struct list_head poll_list; + struct hlist_head *cancel_hash; + unsigned cancel_hash_bits; + bool poll_multi_file; + + spinlock_t inflight_lock; + struct list_head inflight_list; + } __attribute__((__aligned__((1 << (6))))); + + struct delayed_work file_put_work; + struct llist_head file_put_llist; + + struct work_struct exit_work; +}; + + + + + +struct io_poll_iocb { + struct file *file; + union { + struct wait_queue_head *head; + u64 addr; + }; + __poll_t events; + bool done; + bool canceled; + struct wait_queue_entry wait; +}; + +struct io_close { + struct file *file; + struct file *put_file; + int fd; +}; + +struct io_timeout_data { + struct io_kiocb *req; + struct hrtimer timer; + struct timespec64 ts; + enum hrtimer_mode mode; +}; + +struct io_accept { + struct file *file; + struct sockaddr *addr; + int *addr_len; + int flags; + unsigned long nofile; +}; + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +struct io_cancel { + struct file *file; + u64 addr; +}; + +struct io_timeout { + struct file *file; + u64 addr; + int flags; + u32 off; + u32 target_seq; +}; + +struct io_rw { + + struct kiocb kiocb; + u64 addr; + u64 len; +}; + +struct io_connect { + struct file *file; + struct sockaddr *addr; + int addr_len; +}; + +struct io_sr_msg { + struct file *file; + union { + struct user_msghdr *msg; + void *buf; + }; + int msg_flags; + int bgid; + size_t len; + struct io_buffer *kbuf; +}; + +struct io_open { + struct file *file; + int dfd; + struct filename *filename; + struct open_how how; + unsigned long nofile; +}; + +struct io_files_update { + struct file *file; + u64 arg; + u32 nr_args; + u32 offset; +}; + +struct io_fadvise { + struct file *file; + u64 offset; + u32 len; + u32 advice; +}; + +struct io_madvise { + struct file *file; + u64 addr; + u32 len; + u32 advice; +}; + +struct io_epoll { + struct file *file; + int epfd; + int op; + int fd; + struct epoll_event event; +}; + +struct io_splice { + struct file *file_out; + struct file *file_in; + loff_t off_out; + loff_t off_in; + u64 len; + unsigned int flags; +}; + +struct io_provide_buf { + struct file *file; + __u64 addr; + __s32 len; + __u32 bgid; + __u16 nbufs; + __u16 bid; +}; + +struct io_statx { + struct file *file; + int dfd; + unsigned int mask; + unsigned int flags; + const char *filename; + struct statx *buffer; +}; + +struct io_async_connect { + struct __kernel_sockaddr_storage address; +}; + +struct io_async_msghdr { + struct iovec fast_iov[8]; + struct iovec *iov; + struct sockaddr *uaddr; + struct msghdr msg; + struct __kernel_sockaddr_storage addr; +}; + +struct io_async_rw { + struct iovec fast_iov[8]; + struct iovec *iov; + ssize_t nr_segs; + ssize_t size; + struct wait_page_queue wpq; + struct callback_head task_work; +}; + +struct io_async_ctx { + union { + struct io_async_rw rw; + struct io_async_msghdr msg; + struct io_async_connect connect; + struct io_timeout_data timeout; + }; +}; + +enum { + REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, + REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, + REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, + REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, + REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, + REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, + + REQ_F_LINK_HEAD_BIT, + REQ_F_FAIL_LINK_BIT, + REQ_F_INFLIGHT_BIT, + REQ_F_CUR_POS_BIT, + REQ_F_NOWAIT_BIT, + REQ_F_LINK_TIMEOUT_BIT, + REQ_F_ISREG_BIT, + REQ_F_COMP_LOCKED_BIT, + REQ_F_NEED_CLEANUP_BIT, + REQ_F_OVERFLOW_BIT, + REQ_F_POLLED_BIT, + REQ_F_BUFFER_SELECTED_BIT, + REQ_F_NO_FILE_TABLE_BIT, + REQ_F_WORK_INITIALIZED_BIT, + REQ_F_TASK_PINNED_BIT, + + + __REQ_F_LAST_BIT, +}; + +enum { + + REQ_F_FIXED_FILE = ((((1UL))) << (REQ_F_FIXED_FILE_BIT)), + + REQ_F_IO_DRAIN = ((((1UL))) << (REQ_F_IO_DRAIN_BIT)), + + REQ_F_LINK = ((((1UL))) << (REQ_F_LINK_BIT)), + + REQ_F_HARDLINK = ((((1UL))) << (REQ_F_HARDLINK_BIT)), + + REQ_F_FORCE_ASYNC = ((((1UL))) << (REQ_F_FORCE_ASYNC_BIT)), + + REQ_F_BUFFER_SELECT = ((((1UL))) << (REQ_F_BUFFER_SELECT_BIT)), + + + REQ_F_LINK_HEAD = ((((1UL))) << (REQ_F_LINK_HEAD_BIT)), + + REQ_F_FAIL_LINK = ((((1UL))) << (REQ_F_FAIL_LINK_BIT)), + + REQ_F_INFLIGHT = ((((1UL))) << (REQ_F_INFLIGHT_BIT)), + + REQ_F_CUR_POS = ((((1UL))) << (REQ_F_CUR_POS_BIT)), + + REQ_F_NOWAIT = ((((1UL))) << (REQ_F_NOWAIT_BIT)), + + REQ_F_LINK_TIMEOUT = ((((1UL))) << (REQ_F_LINK_TIMEOUT_BIT)), + + REQ_F_ISREG = ((((1UL))) << (REQ_F_ISREG_BIT)), + + REQ_F_COMP_LOCKED = ((((1UL))) << (REQ_F_COMP_LOCKED_BIT)), + + REQ_F_NEED_CLEANUP = ((((1UL))) << (REQ_F_NEED_CLEANUP_BIT)), + + REQ_F_OVERFLOW = ((((1UL))) << (REQ_F_OVERFLOW_BIT)), + + REQ_F_POLLED = ((((1UL))) << (REQ_F_POLLED_BIT)), + + REQ_F_BUFFER_SELECTED = ((((1UL))) << (REQ_F_BUFFER_SELECTED_BIT)), + + REQ_F_NO_FILE_TABLE = ((((1UL))) << (REQ_F_NO_FILE_TABLE_BIT)), + + REQ_F_WORK_INITIALIZED = ((((1UL))) << (REQ_F_WORK_INITIALIZED_BIT)), + + REQ_F_TASK_PINNED = ((((1UL))) << (REQ_F_TASK_PINNED_BIT)), +}; + +struct async_poll { + struct io_poll_iocb poll; + struct io_wq_work work; +}; + + + + + + + +struct io_kiocb { + union { + struct file *file; + struct io_rw rw; + struct io_poll_iocb poll; + struct io_accept accept; + struct io_sync sync; + struct io_cancel cancel; + struct io_timeout timeout; + struct io_connect connect; + struct io_sr_msg sr_msg; + struct io_open open; + struct io_close close; + struct io_files_update files_update; + struct io_fadvise fadvise; + struct io_madvise madvise; + struct io_epoll epoll; + struct io_splice splice; + struct io_provide_buf pbuf; + struct io_statx statx; + }; + + struct io_async_ctx *io; + int cflags; + u8 opcode; + + u8 iopoll_completed; + + u16 buf_index; + + struct io_ring_ctx *ctx; + struct list_head list; + unsigned int flags; + refcount_t refs; + struct task_struct *task; + unsigned long fsize; + u64 user_data; + u32 result; + u32 sequence; + + struct list_head link_list; + + struct list_head inflight_entry; + + struct percpu_ref *fixed_file_refs; + + union { + + + + + + + struct { + struct hlist_node hash_node; + struct async_poll *apoll; + }; + struct io_wq_work work; + }; + struct callback_head task_work; +}; + + + +struct io_comp_state { + unsigned int nr; + struct list_head list; + struct io_ring_ctx *ctx; +}; + +struct io_submit_state { + struct blk_plug plug; + + + + + void *reqs[8]; + unsigned int free_reqs; + + + + + struct io_comp_state comp; + + + + + struct file *file; + unsigned int fd; + unsigned int has_refs; + unsigned int used_refs; + unsigned int ios_left; +}; + +struct io_op_def { + + unsigned async_ctx : 1; + + unsigned needs_mm : 1; + + unsigned needs_file : 1; + + unsigned needs_file_no_error : 1; + + unsigned hash_reg_file : 1; + + unsigned unbound_nonreg_file : 1; + + unsigned not_supported : 1; + + unsigned file_table : 1; + + unsigned needs_fs : 1; + + unsigned pollin : 1; + unsigned pollout : 1; + + unsigned buffer_select : 1; +}; + +static const struct io_op_def io_op_defs[] = { + [IORING_OP_NOP] = {}, + [IORING_OP_READV] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollin = 1, + .buffer_select = 1, + }, + [IORING_OP_WRITEV] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_FSYNC] = { + .needs_file = 1, + }, + [IORING_OP_READ_FIXED] = { + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollin = 1, + }, + [IORING_OP_WRITE_FIXED] = { + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_POLL_ADD] = { + .needs_file = 1, + .unbound_nonreg_file = 1, + }, + [IORING_OP_POLL_REMOVE] = {}, + [IORING_OP_SYNC_FILE_RANGE] = { + .needs_file = 1, + }, + [IORING_OP_SENDMSG] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .needs_fs = 1, + .pollout = 1, + }, + [IORING_OP_RECVMSG] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .needs_fs = 1, + .pollin = 1, + .buffer_select = 1, + }, + [IORING_OP_TIMEOUT] = { + .async_ctx = 1, + .needs_mm = 1, + }, + [IORING_OP_TIMEOUT_REMOVE] = {}, + [IORING_OP_ACCEPT] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .file_table = 1, + .pollin = 1, + }, + [IORING_OP_ASYNC_CANCEL] = {}, + [IORING_OP_LINK_TIMEOUT] = { + .async_ctx = 1, + .needs_mm = 1, + }, + [IORING_OP_CONNECT] = { + .async_ctx = 1, + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_FALLOCATE] = { + .needs_file = 1, + }, + [IORING_OP_OPENAT] = { + .file_table = 1, + .needs_fs = 1, + }, + [IORING_OP_CLOSE] = { + .needs_file = 1, + .needs_file_no_error = 1, + .file_table = 1, + }, + [IORING_OP_FILES_UPDATE] = { + .needs_mm = 1, + .file_table = 1, + }, + [IORING_OP_STATX] = { + .needs_mm = 1, + .needs_fs = 1, + .file_table = 1, + }, + [IORING_OP_READ] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollin = 1, + .buffer_select = 1, + }, + [IORING_OP_WRITE] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_FADVISE] = { + .needs_file = 1, + }, + [IORING_OP_MADVISE] = { + .needs_mm = 1, + }, + [IORING_OP_SEND] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollout = 1, + }, + [IORING_OP_RECV] = { + .needs_mm = 1, + .needs_file = 1, + .unbound_nonreg_file = 1, + .pollin = 1, + .buffer_select = 1, + }, + [IORING_OP_OPENAT2] = { + .file_table = 1, + .needs_fs = 1, + }, + [IORING_OP_EPOLL_CTL] = { + .unbound_nonreg_file = 1, + .file_table = 1, + }, + [IORING_OP_SPLICE] = { + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + }, + [IORING_OP_PROVIDE_BUFFERS] = {}, + [IORING_OP_REMOVE_BUFFERS] = {}, + [IORING_OP_TEE] = { + .needs_file = 1, + .hash_reg_file = 1, + .unbound_nonreg_file = 1, + }, +}; + +enum io_mem_account { + ACCT_LOCKED, + ACCT_PINNED, +}; + +static bool io_rw_reissue(struct io_kiocb *req, long res); +static void io_cqring_fill_event(struct io_kiocb *req, long res); +static void io_put_req(struct io_kiocb *req); +static void io_double_put_req(struct io_kiocb *req); +static void __io_double_put_req(struct io_kiocb *req); +static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); +static void io_queue_linked_timeout(struct io_kiocb *req); +static int __io_sqe_files_update(struct io_ring_ctx *ctx, + struct io_uring_files_update *ip, + unsigned nr_args); +static int io_grab_files(struct io_kiocb *req); +static void io_complete_rw_common(struct kiocb *kiocb, long res, + struct io_comp_state *cs); +static void io_cleanup_req(struct io_kiocb *req); +static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, + int fd, struct file **out_file, bool fixed); +static void __io_queue_sqe(struct io_kiocb *req, + const struct io_uring_sqe *sqe, + struct io_comp_state *cs); + +static ssize_t io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter, + bool needs_lock); +static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, + struct iovec *iovec, struct iovec *fast_iov, + struct iov_iter *iter); + +static struct kmem_cache *req_cachep; + +static const struct file_operations io_uring_fops; + +struct sock *io_uring_get_socket(struct file *file) +{ + + if (file->f_op == &io_uring_fops) { + struct io_ring_ctx *ctx = file->private_data; + + return ctx->ring_sock->sk; + } + + return ((void *)0); +} +extern typeof(io_uring_get_socket) io_uring_get_socket; extern const char __kstrtab_io_uring_get_socket[]; extern const char __kstrtabns_io_uring_get_socket[]; asm(" .section \"___kcrctab" "" "+" "io_uring_get_socket" "\", \"a\" \n" " .weak __crc_" "io_uring_get_socket" " \n" " .long __crc_" "io_uring_get_socket" " \n" " .previous \n"); asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" "__kstrtab_" "io_uring_get_socket" ": \n" " .asciz \"" "io_uring_get_socket" "\" \n" "__kstrtabns_" "io_uring_get_socket" ": \n" " .asciz \"" "" "\" \n" " .previous \n"); static void * __attribute__((__section__(".discard.addressable"))) __attribute__((__used__)) __addressable_io_uring_get_socket928 = (void *)&io_uring_get_socket; asm(" .section \"___ksymtab" "" "+" "io_uring_get_socket" "\", \"a\" \n" " .balign 4 \n" "__ksymtab_" "io_uring_get_socket" ": \n" " .long " "io_uring_get_socket" "- . \n" " .long __kstrtab_" "io_uring_get_socket" "- . \n" " .long __kstrtabns_" "io_uring_get_socket" "- . \n" " .previous \n"); + +static void io_get_req_task(struct io_kiocb *req) +{ + if (req->flags & REQ_F_TASK_PINNED) + return; + get_task_struct(req->task); + req->flags |= REQ_F_TASK_PINNED; +} + + +static void __io_put_req_task(struct io_kiocb *req) +{ + if (req->flags & REQ_F_TASK_PINNED) + put_task_struct(req->task); +} + +static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) +{ + struct mm_struct *mm = get_current()->mm; + + if (mm) { + kthread_unuse_mm(mm); + mmput(mm); + } +} + +static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) +{ + if (!get_current()->mm) { + if (__builtin_expect(!!(!ctx->sqo_mm || !mmget_not_zero(ctx->sqo_mm)), 0)) + return -14; + kthread_use_mm(ctx->sqo_mm); + } + + return 0; +} + +static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, + struct io_kiocb *req) +{ + if (!io_op_defs[req->opcode].needs_mm) + return 0; + return __io_sq_thread_acquire_mm(ctx); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void req_set_fail_links(struct io_kiocb *req) +{ + if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; +} + +static void io_file_put_work(struct work_struct *work); + + + + + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void io_req_init_async(struct io_kiocb *req) +{ + if (req->flags & REQ_F_WORK_INITIALIZED) + return; + + memset(&req->work, 0, sizeof(req->work)); + req->flags |= REQ_F_WORK_INITIALIZED; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool io_async_submit(struct io_ring_ctx *ctx) +{ + return ctx->flags & (1U << 1); +} + +static void io_ring_ctx_ref_free(struct percpu_ref *ref) +{ + struct io_ring_ctx *ctx = ({ void *__mptr = (void *)(ref); do { extern void __compiletime_assert_1700(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(ref)), typeof(((struct io_ring_ctx *)0)->refs)) && !__builtin_types_compatible_p(typeof(*(ref)), typeof(void))))) __compiletime_assert_1700(); } while (0); ((struct io_ring_ctx *)(__mptr - __builtin_offsetof(struct io_ring_ctx, refs))); }); + + complete(&ctx->ref_comp); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool io_is_timeout_noseq(struct io_kiocb *req) +{ + return !req->timeout.off; +} + +static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) +{ + struct io_ring_ctx *ctx; + int hash_bits; + + ctx = kzalloc(sizeof(*ctx), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!ctx) + return ((void *)0); + + ctx->fallback_req = kmem_cache_alloc(req_cachep, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!ctx->fallback_req) + goto err; + + + + + + hash_bits = ( __builtin_constant_p(p->cq_entries) ? ( __builtin_constant_p(p->cq_entries) ? ( (p->cq_entries) < 2 ? 0 : (p->cq_entries) & (1ULL << 63) ? 63 : (p->cq_entries) & (1ULL << 62) ? 62 : (p->cq_entries) & (1ULL << 61) ? 61 : (p->cq_entries) & (1ULL << 60) ? 60 : (p->cq_entries) & (1ULL << 59) ? 59 : (p->cq_entries) & (1ULL << 58) ? 58 : (p->cq_entries) & (1ULL << 57) ? 57 : (p->cq_entries) & (1ULL << 56) ? 56 : (p->cq_entries) & (1ULL << 55) ? 55 : (p->cq_entries) & (1ULL << 54) ? 54 : (p->cq_entries) & (1ULL << 53) ? 53 : (p->cq_entries) & (1ULL << 52) ? 52 : (p->cq_entries) & (1ULL << 51) ? 51 : (p->cq_entries) & (1ULL << 50) ? 50 : (p->cq_entries) & (1ULL << 49) ? 49 : (p->cq_entries) & (1ULL << 48) ? 48 : (p->cq_entries) & (1ULL << 47) ? 47 : (p->cq_entries) & (1ULL << 46) ? 46 : (p->cq_entries) & (1ULL << 45) ? 45 : (p->cq_entries) & (1ULL << 44) ? 44 : (p->cq_entries) & (1ULL << 43) ? 43 : (p->cq_entries) & (1ULL << 42) ? 42 : (p->cq_entries) & (1ULL << 41) ? 41 : (p->cq_entries) & (1ULL << 40) ? 40 : (p->cq_entries) & (1ULL << 39) ? 39 : (p->cq_entries) & (1ULL << 38) ? 38 : (p->cq_entries) & (1ULL << 37) ? 37 : (p->cq_entries) & (1ULL << 36) ? 36 : (p->cq_entries) & (1ULL << 35) ? 35 : (p->cq_entries) & (1ULL << 34) ? 34 : (p->cq_entries) & (1ULL << 33) ? 33 : (p->cq_entries) & (1ULL << 32) ? 32 : (p->cq_entries) & (1ULL << 31) ? 31 : (p->cq_entries) & (1ULL << 30) ? 30 : (p->cq_entries) & (1ULL << 29) ? 29 : (p->cq_entries) & (1ULL << 28) ? 28 : (p->cq_entries) & (1ULL << 27) ? 27 : (p->cq_entries) & (1ULL << 26) ? 26 : (p->cq_entries) & (1ULL << 25) ? 25 : (p->cq_entries) & (1ULL << 24) ? 24 : (p->cq_entries) & (1ULL << 23) ? 23 : (p->cq_entries) & (1ULL << 22) ? 22 : (p->cq_entries) & (1ULL << 21) ? 21 : (p->cq_entries) & (1ULL << 20) ? 20 : (p->cq_entries) & (1ULL << 19) ? 19 : (p->cq_entries) & (1ULL << 18) ? 18 : (p->cq_entries) & (1ULL << 17) ? 17 : (p->cq_entries) & (1ULL << 16) ? 16 : (p->cq_entries) & (1ULL << 15) ? 15 : (p->cq_entries) & (1ULL << 14) ? 14 : (p->cq_entries) & (1ULL << 13) ? 13 : (p->cq_entries) & (1ULL << 12) ? 12 : (p->cq_entries) & (1ULL << 11) ? 11 : (p->cq_entries) & (1ULL << 10) ? 10 : (p->cq_entries) & (1ULL << 9) ? 9 : (p->cq_entries) & (1ULL << 8) ? 8 : (p->cq_entries) & (1ULL << 7) ? 7 : (p->cq_entries) & (1ULL << 6) ? 6 : (p->cq_entries) & (1ULL << 5) ? 5 : (p->cq_entries) & (1ULL << 4) ? 4 : (p->cq_entries) & (1ULL << 3) ? 3 : (p->cq_entries) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof(p->cq_entries) <= 4) ? __ilog2_u32(p->cq_entries) : __ilog2_u64(p->cq_entries) ); + hash_bits -= 5; + if (hash_bits <= 0) + hash_bits = 1; + ctx->cancel_hash_bits = hash_bits; + ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head), + ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!ctx->cancel_hash) + goto err; + __hash_init(ctx->cancel_hash, 1U << hash_bits); + + if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, + PERCPU_REF_ALLOW_REINIT, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)))) + goto err; + + ctx->flags = p->flags; + do { static struct lock_class_key __key; __init_waitqueue_head((&ctx->sqo_wait), "&ctx->sqo_wait", &__key); } while (0); + do { static struct lock_class_key __key; __init_waitqueue_head((&ctx->cq_wait), "&ctx->cq_wait", &__key); } while (0); + INIT_LIST_HEAD(&ctx->cq_overflow_list); + __init_completion(&ctx->ref_comp); + __init_completion(&ctx->sq_thread_comp); + idr_init(&ctx->io_buffer_idr); + idr_init(&ctx->personality_idr); + do { static struct lock_class_key __key; __mutex_init((&ctx->uring_lock), "&ctx->uring_lock", &__key); } while (0); + do { static struct lock_class_key __key; __init_waitqueue_head((&ctx->wait), "&ctx->wait", &__key); } while (0); + do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&ctx->completion_lock), "&ctx->completion_lock", &__key, LD_WAIT_CONFIG); } while (0); + INIT_LIST_HEAD(&ctx->poll_list); + INIT_LIST_HEAD(&ctx->defer_list); + INIT_LIST_HEAD(&ctx->timeout_list); + do { static struct lock_class_key __key; __init_waitqueue_head((&ctx->inflight_wait), "&ctx->inflight_wait", &__key); } while (0); + do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&ctx->inflight_lock), "&ctx->inflight_lock", &__key, LD_WAIT_CONFIG); } while (0); + INIT_LIST_HEAD(&ctx->inflight_list); + do { do { static struct lock_class_key __key; __init_work(((&(&ctx->file_put_work)->work)), 0); ((&(&ctx->file_put_work)->work))->data = (atomic_long_t) { ((unsigned long)WORK_STRUCT_NO_POOL) }; lockdep_init_map(&((&(&ctx->file_put_work)->work))->lockdep_map, "(work_completion)""(&(&ctx->file_put_work)->work)", &__key, 0); INIT_LIST_HEAD(&((&(&ctx->file_put_work)->work))->entry); ((&(&ctx->file_put_work)->work))->func = (((io_file_put_work))); } while (0); do { static struct lock_class_key __key; init_timer_key((&(&ctx->file_put_work)->timer), (delayed_work_timer_fn), ((0) | 0x00200000), "&(&ctx->file_put_work)->timer", &__key); } while (0); } while (0); + init_llist_head(&ctx->file_put_llist); + return ctx; +err: + if (ctx->fallback_req) + kmem_cache_free(req_cachep, ctx->fallback_req); + kfree(ctx->cancel_hash); + kfree(ctx); + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool __req_need_defer(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + return req->sequence != ctx->cached_cq_tail + + atomic_read(&ctx->cached_cq_overflow); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool req_need_defer(struct io_kiocb *req) +{ + if (__builtin_expect(!!(req->flags & REQ_F_IO_DRAIN), 0)) + return __req_need_defer(req); + + return false; +} + +static void __io_commit_cqring(struct io_ring_ctx *ctx) +{ + struct io_rings *rings = ctx->rings; + + + do { do { extern void __compiletime_assert_1701(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&rings->cq.tail) == sizeof(char) || sizeof(*&rings->cq.tail) == sizeof(short) || sizeof(*&rings->cq.tail) == sizeof(int) || sizeof(*&rings->cq.tail) == sizeof(long)))) __compiletime_assert_1701(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1702(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&rings->cq.tail) == sizeof(char) || sizeof(*&rings->cq.tail) == sizeof(short) || sizeof(*&rings->cq.tail) == sizeof(int) || sizeof(*&rings->cq.tail) == sizeof(long)) || sizeof(*&rings->cq.tail) == sizeof(long long))) __compiletime_assert_1702(); } while (0); do { *(volatile typeof(*&rings->cq.tail) *)&(*&rings->cq.tail) = (ctx->cached_cq_tail); } while (0); } while (0); } while (0); + + if (wq_has_sleeper(&ctx->cq_wait)) { + __wake_up(&ctx->cq_wait, 0x0001, 1, ((void *)0)); + kill_fasync(&ctx->cq_fasync, 29, 1); + } +} + +static void io_req_work_grab_env(struct io_kiocb *req) +{ + const struct io_op_def *def = &io_op_defs[req->opcode]; + + io_req_init_async(req); + + if (!req->work.mm && def->needs_mm) { + mmgrab(get_current()->mm); + req->work.mm = get_current()->mm; + } + if (!req->work.creds) + req->work.creds = (get_cred(({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("fs/io_uring.c", 1112, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); }))); + if (!req->work.fs && def->needs_fs) { + spin_lock(&get_current()->fs->lock); + if (!get_current()->fs->in_exec) { + req->work.fs = get_current()->fs; + req->work.fs->users++; + } else { + req->work.flags |= IO_WQ_WORK_CANCEL; + } + spin_unlock(&get_current()->fs->lock); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void io_req_work_drop_env(struct io_kiocb *req) +{ + if (!(req->flags & REQ_F_WORK_INITIALIZED)) + return; + + if (req->work.mm) { + mmdrop(req->work.mm); + req->work.mm = ((void *)0); + } + if (req->work.creds) { + put_cred(req->work.creds); + req->work.creds = ((void *)0); + } + if (req->work.fs) { + struct fs_struct *fs = req->work.fs; + + spin_lock(&req->work.fs->lock); + if (--fs->users) + fs = ((void *)0); + spin_unlock(&req->work.fs->lock); + if (fs) + free_fs_struct(fs); + } +} + +static void io_prep_async_work(struct io_kiocb *req) +{ + const struct io_op_def *def = &io_op_defs[req->opcode]; + + if (req->flags & REQ_F_ISREG) { + if (def->hash_reg_file) + io_wq_hash_work(&req->work, file_inode(req->file)); + } else { + if (def->unbound_nonreg_file) + req->work.flags |= IO_WQ_WORK_UNBOUND; + } + + io_req_work_grab_env(req); +} + +static void io_prep_async_link(struct io_kiocb *req) +{ + struct io_kiocb *cur; + + io_prep_async_work(req); + if (req->flags & REQ_F_LINK_HEAD) + for (cur = ({ void *__mptr = (void *)((&req->link_list)->next); do { extern void __compiletime_assert_1703(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&req->link_list)->next)), typeof(((typeof(*cur) *)0)->link_list)) && !__builtin_types_compatible_p(typeof(*((&req->link_list)->next)), typeof(void))))) __compiletime_assert_1703(); } while (0); ((typeof(*cur) *)(__mptr - __builtin_offsetof(typeof(*cur), link_list))); }); &cur->link_list != (&req->link_list); cur = ({ void *__mptr = (void *)((cur)->link_list.next); do { extern void __compiletime_assert_1704(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((cur)->link_list.next)), typeof(((typeof(*(cur)) *)0)->link_list)) && !__builtin_types_compatible_p(typeof(*((cur)->link_list.next)), typeof(void))))) __compiletime_assert_1704(); } while (0); ((typeof(*(cur)) *)(__mptr - __builtin_offsetof(typeof(*(cur)), link_list))); })) + io_prep_async_work(cur); +} + +static void __io_queue_async_work(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *link = io_prep_linked_timeout(req); + + trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, + &req->work, req->flags); + io_wq_enqueue(ctx->io_wq, &req->work); + + if (link) + io_queue_linked_timeout(link); +} + +static void io_queue_async_work(struct io_kiocb *req) +{ + + io_prep_async_link(req); + __io_queue_async_work(req); +} + +static void io_kill_timeout(struct io_kiocb *req) +{ + int ret; + + ret = hrtimer_try_to_cancel(&req->io->timeout.timer); + if (ret != -1) { + atomic_inc(&req->ctx->cq_timeouts); + list_del_init(&req->list); + req->flags |= REQ_F_COMP_LOCKED; + io_cqring_fill_event(req, 0); + io_put_req(req); + } +} + +static void io_kill_timeouts(struct io_ring_ctx *ctx) +{ + struct io_kiocb *req, *tmp; + + spin_lock_irq(&ctx->completion_lock); + for (req = ({ void *__mptr = (void *)((&ctx->timeout_list)->next); do { extern void __compiletime_assert_1705(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ctx->timeout_list)->next)), typeof(((typeof(*req) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&ctx->timeout_list)->next)), typeof(void))))) __compiletime_assert_1705(); } while (0); ((typeof(*req) *)(__mptr - __builtin_offsetof(typeof(*req), list))); }), tmp = ({ void *__mptr = (void *)((req)->list.next); do { extern void __compiletime_assert_1706(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((req)->list.next)), typeof(((typeof(*(req)) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((req)->list.next)), typeof(void))))) __compiletime_assert_1706(); } while (0); ((typeof(*(req)) *)(__mptr - __builtin_offsetof(typeof(*(req)), list))); }); &req->list != (&ctx->timeout_list); req = tmp, tmp = ({ void *__mptr = (void *)((tmp)->list.next); do { extern void __compiletime_assert_1707(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((tmp)->list.next)), typeof(((typeof(*(tmp)) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((tmp)->list.next)), typeof(void))))) __compiletime_assert_1707(); } while (0); ((typeof(*(tmp)) *)(__mptr - __builtin_offsetof(typeof(*(tmp)), list))); })) + io_kill_timeout(req); + spin_unlock_irq(&ctx->completion_lock); +} + +static void __io_queue_deferred(struct io_ring_ctx *ctx) +{ + do { + struct io_kiocb *req = ({ void *__mptr = (void *)((&ctx->defer_list)->next); do { extern void __compiletime_assert_1708(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ctx->defer_list)->next)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&ctx->defer_list)->next)), typeof(void))))) __compiletime_assert_1708(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }) + ; + + if (req_need_defer(req)) + break; + list_del_init(&req->list); + + __io_queue_async_work(req); + } while (!list_empty(&ctx->defer_list)); +} + +static void io_flush_timeouts(struct io_ring_ctx *ctx) +{ + while (!list_empty(&ctx->timeout_list)) { + struct io_kiocb *req = ({ void *__mptr = (void *)((&ctx->timeout_list)->next); do { extern void __compiletime_assert_1709(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ctx->timeout_list)->next)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&ctx->timeout_list)->next)), typeof(void))))) __compiletime_assert_1709(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }) + ; + + if (io_is_timeout_noseq(req)) + break; + if (req->timeout.target_seq != ctx->cached_cq_tail + - atomic_read(&ctx->cq_timeouts)) + break; + + list_del_init(&req->list); + io_kill_timeout(req); + } +} + +static void io_commit_cqring(struct io_ring_ctx *ctx) +{ + io_flush_timeouts(ctx); + __io_commit_cqring(ctx); + + if (__builtin_expect(!!(!list_empty(&ctx->defer_list)), 0)) + __io_queue_deferred(ctx); +} + +static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) +{ + struct io_rings *rings = ctx->rings; + unsigned tail; + + tail = ctx->cached_cq_tail; + + + + + + if (tail - ({ do { extern void __compiletime_assert_1710(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(rings->cq.head) == sizeof(char) || sizeof(rings->cq.head) == sizeof(short) || sizeof(rings->cq.head) == sizeof(int) || sizeof(rings->cq.head) == sizeof(long)) || sizeof(rings->cq.head) == sizeof(long long))) __compiletime_assert_1710(); } while (0); ({ typeof( _Generic((rings->cq.head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.head))) __x = (*(const volatile typeof( _Generic((rings->cq.head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.head))) *)&(rings->cq.head)); do { } while (0); (typeof(rings->cq.head))__x; }); }) == rings->cq_ring_entries) + return ((void *)0); + + ctx->cached_cq_tail++; + return &rings->cqes[tail & ctx->cq_mask]; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool io_should_trigger_evfd(struct io_ring_ctx *ctx) +{ + if (!ctx->cq_ev_fd) + return false; + if (({ do { extern void __compiletime_assert_1711(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ctx->rings->cq_flags) == sizeof(char) || sizeof(ctx->rings->cq_flags) == sizeof(short) || sizeof(ctx->rings->cq_flags) == sizeof(int) || sizeof(ctx->rings->cq_flags) == sizeof(long)) || sizeof(ctx->rings->cq_flags) == sizeof(long long))) __compiletime_assert_1711(); } while (0); ({ typeof( _Generic((ctx->rings->cq_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ctx->rings->cq_flags))) __x = (*(const volatile typeof( _Generic((ctx->rings->cq_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ctx->rings->cq_flags))) *)&(ctx->rings->cq_flags)); do { } while (0); (typeof(ctx->rings->cq_flags))__x; }); }) & (1U << 0)) + return false; + if (!ctx->eventfd_async) + return true; + return io_wq_current_is_worker(); +} + +static void io_cqring_ev_posted(struct io_ring_ctx *ctx) +{ + if (waitqueue_active(&ctx->wait)) + __wake_up(&ctx->wait, (0x0001 | 0x0002), 1, ((void *)0)); + if (waitqueue_active(&ctx->sqo_wait)) + __wake_up(&ctx->sqo_wait, (0x0001 | 0x0002), 1, ((void *)0)); + if (io_should_trigger_evfd(ctx)) + eventfd_signal(ctx->cq_ev_fd, 1); +} + + +static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) +{ + struct io_rings *rings = ctx->rings; + struct io_uring_cqe *cqe; + struct io_kiocb *req; + unsigned long flags; + struct list_head list = { &(list), &(list) }; + + if (!force) { + if (list_empty_careful(&ctx->cq_overflow_list)) + return true; + if ((ctx->cached_cq_tail - ({ do { extern void __compiletime_assert_1712(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(rings->cq.head) == sizeof(char) || sizeof(rings->cq.head) == sizeof(short) || sizeof(rings->cq.head) == sizeof(int) || sizeof(rings->cq.head) == sizeof(long)) || sizeof(rings->cq.head) == sizeof(long long))) __compiletime_assert_1712(); } while (0); ({ typeof( _Generic((rings->cq.head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.head))) __x = (*(const volatile typeof( _Generic((rings->cq.head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.head))) *)&(rings->cq.head)); do { } while (0); (typeof(rings->cq.head))__x; }); }) == + rings->cq_ring_entries)) + return false; + } + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->completion_lock)); } while (0); } while (0); + + + if (force) + ctx->cq_overflow_flushed = 1; + + cqe = ((void *)0); + while (!list_empty(&ctx->cq_overflow_list)) { + cqe = io_get_cqring(ctx); + if (!cqe && !force) + break; + + req = ({ void *__mptr = (void *)((&ctx->cq_overflow_list)->next); do { extern void __compiletime_assert_1713(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ctx->cq_overflow_list)->next)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&ctx->cq_overflow_list)->next)), typeof(void))))) __compiletime_assert_1713(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }) + ; + list_move(&req->list, &list); + req->flags &= ~REQ_F_OVERFLOW; + if (cqe) { + do { do { extern void __compiletime_assert_1714(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(cqe->user_data) == sizeof(char) || sizeof(cqe->user_data) == sizeof(short) || sizeof(cqe->user_data) == sizeof(int) || sizeof(cqe->user_data) == sizeof(long)) || sizeof(cqe->user_data) == sizeof(long long))) __compiletime_assert_1714(); } while (0); do { *(volatile typeof(cqe->user_data) *)&(cqe->user_data) = (req->user_data); } while (0); } while (0); + do { do { extern void __compiletime_assert_1715(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(cqe->res) == sizeof(char) || sizeof(cqe->res) == sizeof(short) || sizeof(cqe->res) == sizeof(int) || sizeof(cqe->res) == sizeof(long)) || sizeof(cqe->res) == sizeof(long long))) __compiletime_assert_1715(); } while (0); do { *(volatile typeof(cqe->res) *)&(cqe->res) = (req->result); } while (0); } while (0); + do { do { extern void __compiletime_assert_1716(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(cqe->flags) == sizeof(char) || sizeof(cqe->flags) == sizeof(short) || sizeof(cqe->flags) == sizeof(int) || sizeof(cqe->flags) == sizeof(long)) || sizeof(cqe->flags) == sizeof(long long))) __compiletime_assert_1716(); } while (0); do { *(volatile typeof(cqe->flags) *)&(cqe->flags) = (req->cflags); } while (0); } while (0); + } else { + do { do { extern void __compiletime_assert_1717(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ctx->rings->cq_overflow) == sizeof(char) || sizeof(ctx->rings->cq_overflow) == sizeof(short) || sizeof(ctx->rings->cq_overflow) == sizeof(int) || sizeof(ctx->rings->cq_overflow) == sizeof(long)) || sizeof(ctx->rings->cq_overflow) == sizeof(long long))) __compiletime_assert_1717(); } while (0); do { *(volatile typeof(ctx->rings->cq_overflow) *)&(ctx->rings->cq_overflow) = (atomic_inc_return(&ctx->cached_cq_overflow)); } while (0); } while (0) + ; + } + } + + io_commit_cqring(ctx); + if (cqe) { + clear_bit(0, &ctx->sq_check_overflow); + clear_bit(0, &ctx->cq_check_overflow); + } + spin_unlock_irqrestore(&ctx->completion_lock, flags); + io_cqring_ev_posted(ctx); + + while (!list_empty(&list)) { + req = ({ void *__mptr = (void *)((&list)->next); do { extern void __compiletime_assert_1718(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&list)->next)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&list)->next)), typeof(void))))) __compiletime_assert_1718(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }); + list_del(&req->list); + io_put_req(req); + } + + return cqe != ((void *)0); +} + +static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_uring_cqe *cqe; + + trace_io_uring_complete(ctx, req->user_data, res); + + + + + + + cqe = io_get_cqring(ctx); + if (__builtin_expect(!!(cqe), 1)) { + do { do { extern void __compiletime_assert_1719(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(cqe->user_data) == sizeof(char) || sizeof(cqe->user_data) == sizeof(short) || sizeof(cqe->user_data) == sizeof(int) || sizeof(cqe->user_data) == sizeof(long)) || sizeof(cqe->user_data) == sizeof(long long))) __compiletime_assert_1719(); } while (0); do { *(volatile typeof(cqe->user_data) *)&(cqe->user_data) = (req->user_data); } while (0); } while (0); + do { do { extern void __compiletime_assert_1720(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(cqe->res) == sizeof(char) || sizeof(cqe->res) == sizeof(short) || sizeof(cqe->res) == sizeof(int) || sizeof(cqe->res) == sizeof(long)) || sizeof(cqe->res) == sizeof(long long))) __compiletime_assert_1720(); } while (0); do { *(volatile typeof(cqe->res) *)&(cqe->res) = (res); } while (0); } while (0); + do { do { extern void __compiletime_assert_1721(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(cqe->flags) == sizeof(char) || sizeof(cqe->flags) == sizeof(short) || sizeof(cqe->flags) == sizeof(int) || sizeof(cqe->flags) == sizeof(long)) || sizeof(cqe->flags) == sizeof(long long))) __compiletime_assert_1721(); } while (0); do { *(volatile typeof(cqe->flags) *)&(cqe->flags) = (cflags); } while (0); } while (0); + } else if (ctx->cq_overflow_flushed) { + do { do { extern void __compiletime_assert_1722(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ctx->rings->cq_overflow) == sizeof(char) || sizeof(ctx->rings->cq_overflow) == sizeof(short) || sizeof(ctx->rings->cq_overflow) == sizeof(int) || sizeof(ctx->rings->cq_overflow) == sizeof(long)) || sizeof(ctx->rings->cq_overflow) == sizeof(long long))) __compiletime_assert_1722(); } while (0); do { *(volatile typeof(ctx->rings->cq_overflow) *)&(ctx->rings->cq_overflow) = (atomic_inc_return(&ctx->cached_cq_overflow)); } while (0); } while (0) + ; + } else { + if (list_empty(&ctx->cq_overflow_list)) { + set_bit(0, &ctx->sq_check_overflow); + set_bit(0, &ctx->cq_check_overflow); + } + req->flags |= REQ_F_OVERFLOW; + refcount_inc(&req->refs); + req->result = res; + req->cflags = cflags; + list_add_tail(&req->list, &ctx->cq_overflow_list); + } +} + +static void io_cqring_fill_event(struct io_kiocb *req, long res) +{ + __io_cqring_fill_event(req, res, 0); +} + +static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) +{ + struct io_ring_ctx *ctx = req->ctx; + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->completion_lock)); } while (0); } while (0); + __io_cqring_fill_event(req, res, cflags); + io_commit_cqring(ctx); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + + io_cqring_ev_posted(ctx); +} + +static void io_submit_flush_completions(struct io_comp_state *cs) +{ + struct io_ring_ctx *ctx = cs->ctx; + + spin_lock_irq(&ctx->completion_lock); + while (!list_empty(&cs->list)) { + struct io_kiocb *req; + + req = ({ void *__mptr = (void *)((&cs->list)->next); do { extern void __compiletime_assert_1723(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&cs->list)->next)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&cs->list)->next)), typeof(void))))) __compiletime_assert_1723(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }); + list_del(&req->list); + __io_cqring_fill_event(req, req->result, req->cflags); + if (!(req->flags & REQ_F_LINK_HEAD)) { + req->flags |= REQ_F_COMP_LOCKED; + io_put_req(req); + } else { + spin_unlock_irq(&ctx->completion_lock); + io_put_req(req); + spin_lock_irq(&ctx->completion_lock); + } + } + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); + cs->nr = 0; +} + +static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, + struct io_comp_state *cs) +{ + if (!cs) { + io_cqring_add_event(req, res, cflags); + io_put_req(req); + } else { + req->result = res; + req->cflags = cflags; + list_add_tail(&req->list, &cs->list); + if (++cs->nr >= 32) + io_submit_flush_completions(cs); + } +} + +static void io_req_complete(struct io_kiocb *req, long res) +{ + __io_req_complete(req, res, 0, ((void *)0)); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool io_is_fallback_req(struct io_kiocb *req) +{ + return req == (struct io_kiocb *) + ((unsigned long) req->ctx->fallback_req & ~1UL); +} + +static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx) +{ + struct io_kiocb *req; + + req = ctx->fallback_req; + if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req)) + return req; + + return ((void *)0); +} + +static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx, + struct io_submit_state *state) +{ + gfp_t gfp = ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)) | (( gfp_t)0x2000u); + struct io_kiocb *req; + + if (!state->free_reqs) { + size_t sz; + int ret; + + sz = __builtin_choose_expr(((!!(sizeof((typeof((size_t)(state->ios_left)) *)1 == (typeof((size_t)((sizeof(state->reqs) / sizeof((state->reqs)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((state->reqs)), typeof(&(state->reqs)[0])))); })))))) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((size_t)(state->ios_left)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((size_t)((sizeof(state->reqs) / sizeof((state->reqs)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((state->reqs)), typeof(&(state->reqs)[0])))); })))))) * 0l)) : (int *)8))))), (((size_t)(state->ios_left)) < ((size_t)((sizeof(state->reqs) / sizeof((state->reqs)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((state->reqs)), typeof(&(state->reqs)[0])))); })))))) ? ((size_t)(state->ios_left)) : ((size_t)((sizeof(state->reqs) / sizeof((state->reqs)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((state->reqs)), typeof(&(state->reqs)[0])))); }))))))), ({ typeof((size_t)(state->ios_left)) __UNIQUE_ID___x1724 = ((size_t)(state->ios_left)); typeof((size_t)((sizeof(state->reqs) / sizeof((state->reqs)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((state->reqs)), typeof(&(state->reqs)[0])))); })))))) __UNIQUE_ID___y1725 = ((size_t)((sizeof(state->reqs) / sizeof((state->reqs)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((state->reqs)), typeof(&(state->reqs)[0])))); })))))); ((__UNIQUE_ID___x1724) < (__UNIQUE_ID___y1725) ? (__UNIQUE_ID___x1724) : (__UNIQUE_ID___y1725)); })); + ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs); + + + + + + if (__builtin_expect(!!(ret <= 0), 0)) { + state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); + if (!state->reqs[0]) + goto fallback; + ret = 1; + } + state->free_reqs = ret - 1; + req = state->reqs[ret - 1]; + } else { + state->free_reqs--; + req = state->reqs[state->free_reqs]; + } + + return req; +fallback: + return io_get_fallback_req(ctx); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void io_put_file(struct io_kiocb *req, struct file *file, + bool fixed) +{ + if (fixed) + percpu_ref_put(req->fixed_file_refs); + else + fput(file); +} + +static void io_dismantle_req(struct io_kiocb *req) +{ + if (req->flags & REQ_F_NEED_CLEANUP) + io_cleanup_req(req); + + kfree(req->io); + if (req->file) + io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); + __io_put_req_task(req); + io_req_work_drop_env(req); + + if (req->flags & REQ_F_INFLIGHT) { + struct io_ring_ctx *ctx = req->ctx; + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->inflight_lock)); } while (0); } while (0); + list_del(&req->inflight_entry); + if (waitqueue_active(&ctx->inflight_wait)) + __wake_up(&ctx->inflight_wait, (0x0001 | 0x0002), 1, ((void *)0)); + spin_unlock_irqrestore(&ctx->inflight_lock, flags); + } +} + +static void __io_free_req(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx; + + io_dismantle_req(req); + ctx = req->ctx; + if (__builtin_expect(!!(!io_is_fallback_req(req)), 1)) + kmem_cache_free(req_cachep, req); + else + clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req); + percpu_ref_put(&ctx->refs); +} + +static bool io_link_cancel_timeout(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + ret = hrtimer_try_to_cancel(&req->io->timeout.timer); + if (ret != -1) { + io_cqring_fill_event(req, -125); + io_commit_cqring(ctx); + req->flags &= ~REQ_F_LINK_HEAD; + io_put_req(req); + return true; + } + + return false; +} + +static bool __io_kill_linked_timeout(struct io_kiocb *req) +{ + struct io_kiocb *link; + bool wake_ev; + + if (list_empty(&req->link_list)) + return false; + link = ({ void *__mptr = (void *)((&req->link_list)->next); do { extern void __compiletime_assert_1726(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&req->link_list)->next)), typeof(((struct io_kiocb *)0)->link_list)) && !__builtin_types_compatible_p(typeof(*((&req->link_list)->next)), typeof(void))))) __compiletime_assert_1726(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, link_list))); }); + if (link->opcode != IORING_OP_LINK_TIMEOUT) + return false; + + list_del_init(&link->link_list); + wake_ev = io_link_cancel_timeout(link); + req->flags &= ~REQ_F_LINK_TIMEOUT; + return wake_ev; +} + +static void io_kill_linked_timeout(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + bool wake_ev; + + if (!(req->flags & REQ_F_COMP_LOCKED)) { + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->completion_lock)); } while (0); } while (0); + wake_ev = __io_kill_linked_timeout(req); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else { + wake_ev = __io_kill_linked_timeout(req); + } + + if (wake_ev) + io_cqring_ev_posted(ctx); +} + +static struct io_kiocb *io_req_link_next(struct io_kiocb *req) +{ + struct io_kiocb *nxt; + + + + + + + if (__builtin_expect(!!(list_empty(&req->link_list)), 0)) + return ((void *)0); + + nxt = ({ void *__mptr = (void *)((&req->link_list)->next); do { extern void __compiletime_assert_1727(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&req->link_list)->next)), typeof(((struct io_kiocb *)0)->link_list)) && !__builtin_types_compatible_p(typeof(*((&req->link_list)->next)), typeof(void))))) __compiletime_assert_1727(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, link_list))); }); + list_del_init(&req->link_list); + if (!list_empty(&nxt->link_list)) + nxt->flags |= REQ_F_LINK_HEAD; + return nxt; +} + + + + +static void __io_fail_links(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + while (!list_empty(&req->link_list)) { + struct io_kiocb *link = ({ void *__mptr = (void *)((&req->link_list)->next); do { extern void __compiletime_assert_1728(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&req->link_list)->next)), typeof(((struct io_kiocb *)0)->link_list)) && !__builtin_types_compatible_p(typeof(*((&req->link_list)->next)), typeof(void))))) __compiletime_assert_1728(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, link_list))); }) + ; + + list_del_init(&link->link_list); + trace_io_uring_fail_link(req, link); + + io_cqring_fill_event(link, -125); + __io_double_put_req(link); + req->flags &= ~REQ_F_LINK_TIMEOUT; + } + + io_commit_cqring(ctx); + io_cqring_ev_posted(ctx); +} + +static void io_fail_links(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!(req->flags & REQ_F_COMP_LOCKED)) { + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->completion_lock)); } while (0); } while (0); + __io_fail_links(req); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else { + __io_fail_links(req); + } + + io_cqring_ev_posted(ctx); +} + +static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) +{ + req->flags &= ~REQ_F_LINK_HEAD; + if (req->flags & REQ_F_LINK_TIMEOUT) + io_kill_linked_timeout(req); + + + + + + + + if (__builtin_expect(!!(!(req->flags & REQ_F_FAIL_LINK)), 1)) + return io_req_link_next(req); + io_fail_links(req); + return ((void *)0); +} + +static struct io_kiocb *io_req_find_next(struct io_kiocb *req) +{ + if (__builtin_expect(!!(!(req->flags & REQ_F_LINK_HEAD)), 1)) + return ((void *)0); + return __io_req_find_next(req); +} + +static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) +{ + struct task_struct *tsk = req->task; + struct io_ring_ctx *ctx = req->ctx; + int ret, notify = 1; + + + + + + + + if (ctx->flags & (1U << 1)) + notify = 0; + else if (ctx->cq_ev_fd) + notify = 2; + + ret = task_work_add(tsk, cb, notify); + if (!ret) + wake_up_process(tsk); + return ret; +} + +static void __io_req_task_cancel(struct io_kiocb *req, int error) +{ + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->completion_lock); + io_cqring_fill_event(req, error); + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); + req_set_fail_links(req); + io_double_put_req(req); +} + +static void io_req_task_cancel(struct callback_head *cb) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(cb); do { extern void __compiletime_assert_1729(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(cb)), typeof(((struct io_kiocb *)0)->task_work)) && !__builtin_types_compatible_p(typeof(*(cb)), typeof(void))))) __compiletime_assert_1729(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, task_work))); }); + + __io_req_task_cancel(req, -125); +} + +static void __io_req_task_submit(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!__io_sq_thread_acquire_mm(ctx)) { + mutex_lock_nested(&ctx->uring_lock, 0); + __io_queue_sqe(req, ((void *)0), ((void *)0)); + mutex_unlock(&ctx->uring_lock); + } else { + __io_req_task_cancel(req, -14); + } +} + +static void io_req_task_submit(struct callback_head *cb) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(cb); do { extern void __compiletime_assert_1730(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(cb)), typeof(((struct io_kiocb *)0)->task_work)) && !__builtin_types_compatible_p(typeof(*(cb)), typeof(void))))) __compiletime_assert_1730(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, task_work))); }); + + __io_req_task_submit(req); +} + +static void io_req_task_queue(struct io_kiocb *req) +{ + int ret; + + init_task_work(&req->task_work, io_req_task_submit); + + ret = io_req_task_work_add(req, &req->task_work); + if (__builtin_expect(!!(ret), 0)) { + struct task_struct *tsk; + + init_task_work(&req->task_work, io_req_task_cancel); + tsk = io_wq_get_task(req->ctx->io_wq); + task_work_add(tsk, &req->task_work, 0); + wake_up_process(tsk); + } +} + +static void io_queue_next(struct io_kiocb *req) +{ + struct io_kiocb *nxt = io_req_find_next(req); + + if (nxt) + io_req_task_queue(nxt); +} + +static void io_free_req(struct io_kiocb *req) +{ + io_queue_next(req); + __io_free_req(req); +} + +struct req_batch { + void *reqs[8]; + int to_free; +}; + +static void __io_req_free_batch_flush(struct io_ring_ctx *ctx, + struct req_batch *rb) +{ + kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs); + percpu_ref_put_many(&ctx->refs, rb->to_free); + rb->to_free = 0; +} + +static void io_req_free_batch_finish(struct io_ring_ctx *ctx, + struct req_batch *rb) +{ + if (rb->to_free) + __io_req_free_batch_flush(ctx, rb); +} + +static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) +{ + if (__builtin_expect(!!(io_is_fallback_req(req)), 0)) { + io_free_req(req); + return; + } + if (req->flags & REQ_F_LINK_HEAD) + io_queue_next(req); + + io_dismantle_req(req); + rb->reqs[rb->to_free++] = req; + if (__builtin_expect(!!(rb->to_free == (sizeof(rb->reqs) / sizeof((rb->reqs)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((rb->reqs)), typeof(&(rb->reqs)[0])))); }))))), 0)) + __io_req_free_batch_flush(req->ctx, rb); +} + + + + + +static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) +{ + struct io_kiocb *nxt = ((void *)0); + + if (refcount_dec_and_test(&req->refs)) { + nxt = io_req_find_next(req); + __io_free_req(req); + } + return nxt; +} + +static void io_put_req(struct io_kiocb *req) +{ + if (refcount_dec_and_test(&req->refs)) + io_free_req(req); +} + +static struct io_wq_work *io_steal_work(struct io_kiocb *req) +{ + struct io_kiocb *nxt; + + + + + + + if (refcount_read(&req->refs) != 1) + return ((void *)0); + + nxt = io_req_find_next(req); + return nxt ? &nxt->work : ((void *)0); +} + + + + + +static void __io_double_put_req(struct io_kiocb *req) +{ + + if (refcount_sub_and_test(2, &req->refs)) + __io_free_req(req); +} + +static void io_double_put_req(struct io_kiocb *req) +{ + + if (refcount_sub_and_test(2, &req->refs)) + io_free_req(req); +} + +static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush) +{ + struct io_rings *rings = ctx->rings; + + if (test_bit(0, &ctx->cq_check_overflow)) { + + + + + + if (noflush && !list_empty(&ctx->cq_overflow_list)) + return -1U; + + io_cqring_overflow_flush(ctx, false); + } + + + __asm__ __volatile__("": : :"memory"); + return ctx->cached_cq_tail - ({ do { extern void __compiletime_assert_1731(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(rings->cq.head) == sizeof(char) || sizeof(rings->cq.head) == sizeof(short) || sizeof(rings->cq.head) == sizeof(int) || sizeof(rings->cq.head) == sizeof(long)) || sizeof(rings->cq.head) == sizeof(long long))) __compiletime_assert_1731(); } while (0); ({ typeof( _Generic((rings->cq.head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.head))) __x = (*(const volatile typeof( _Generic((rings->cq.head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.head))) *)&(rings->cq.head)); do { } while (0); (typeof(rings->cq.head))__x; }); }); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) unsigned int io_sqring_entries(struct io_ring_ctx *ctx) +{ + struct io_rings *rings = ctx->rings; + + + return ({ typeof(*&rings->sq.tail) ___p1 = ({ do { extern void __compiletime_assert_1732(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&rings->sq.tail) == sizeof(char) || sizeof(*&rings->sq.tail) == sizeof(short) || sizeof(*&rings->sq.tail) == sizeof(int) || sizeof(*&rings->sq.tail) == sizeof(long)) || sizeof(*&rings->sq.tail) == sizeof(long long))) __compiletime_assert_1732(); } while (0); ({ typeof( _Generic((*&rings->sq.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&rings->sq.tail))) __x = (*(const volatile typeof( _Generic((*&rings->sq.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (*&rings->sq.tail))) *)&(*&rings->sq.tail)); do { } while (0); (typeof(*&rings->sq.tail))__x; }); }); do { extern void __compiletime_assert_1733(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&rings->sq.tail) == sizeof(char) || sizeof(*&rings->sq.tail) == sizeof(short) || sizeof(*&rings->sq.tail) == sizeof(int) || sizeof(*&rings->sq.tail) == sizeof(long)))) __compiletime_assert_1733(); } while (0); __asm__ __volatile__("": : :"memory"); ___p1; }) - ctx->cached_sq_head; +} + +static int io_put_kbuf(struct io_kiocb *req) +{ + struct io_buffer *kbuf; + int cflags; + + kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; + cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT; + cflags |= (1U << 0); + req->rw.addr = 0; + kfree(kbuf); + return cflags; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool io_run_task_work(void) +{ + if (get_current()->task_works) { + do { ({ int __ret_warn_on = !!(((0x0000) & (0x0004 | 0x0008 | 0x0040 | 0x0080))); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1734)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("fs/io_uring.c"), "i" (1920), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1735)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1736)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); get_current()->task_state_change = ({ __label__ __here; __here: (unsigned long)&&__here; }); get_current()->state = (0x0000); } while (0); + task_work_run(); + return true; + } + + return false; +} + +static void io_iopoll_queue(struct list_head *again) +{ + struct io_kiocb *req; + + do { + req = ({ void *__mptr = (void *)((again)->next); do { extern void __compiletime_assert_1737(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((again)->next)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*((again)->next)), typeof(void))))) __compiletime_assert_1737(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }); + list_del(&req->list); + if (!io_rw_reissue(req, -11)) + io_complete_rw_common(&req->rw.kiocb, -11, ((void *)0)); + } while (!list_empty(again)); +} + + + + +static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, + struct list_head *done) +{ + struct req_batch rb; + struct io_kiocb *req; + struct list_head again = { &(again), &(again) }; + + + __asm__ __volatile__("": : :"memory"); + + rb.to_free = 0; + while (!list_empty(done)) { + int cflags = 0; + + req = ({ void *__mptr = (void *)((done)->next); do { extern void __compiletime_assert_1738(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((done)->next)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*((done)->next)), typeof(void))))) __compiletime_assert_1738(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }); + if (({ do { extern void __compiletime_assert_1739(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(req->result) == sizeof(char) || sizeof(req->result) == sizeof(short) || sizeof(req->result) == sizeof(int) || sizeof(req->result) == sizeof(long)) || sizeof(req->result) == sizeof(long long))) __compiletime_assert_1739(); } while (0); ({ typeof( _Generic((req->result), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (req->result))) __x = (*(const volatile typeof( _Generic((req->result), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (req->result))) *)&(req->result)); do { } while (0); (typeof(req->result))__x; }); }) == -11) { + req->iopoll_completed = 0; + list_move_tail(&req->list, &again); + continue; + } + list_del(&req->list); + + if (req->flags & REQ_F_BUFFER_SELECTED) + cflags = io_put_kbuf(req); + + __io_cqring_fill_event(req, req->result, cflags); + (*nr_events)++; + + if (refcount_dec_and_test(&req->refs)) + io_req_free_batch(&rb, req); + } + + io_commit_cqring(ctx); + if (ctx->flags & (1U << 1)) + io_cqring_ev_posted(ctx); + io_req_free_batch_finish(ctx, &rb); + + if (!list_empty(&again)) + io_iopoll_queue(&again); +} + +static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, + long min) +{ + struct io_kiocb *req, *tmp; + struct list_head done = { &(done), &(done) }; + bool spin; + int ret; + + + + + + spin = !ctx->poll_multi_file && *nr_events < min; + + ret = 0; + for (req = ({ void *__mptr = (void *)((&ctx->poll_list)->next); do { extern void __compiletime_assert_1740(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ctx->poll_list)->next)), typeof(((typeof(*req) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&ctx->poll_list)->next)), typeof(void))))) __compiletime_assert_1740(); } while (0); ((typeof(*req) *)(__mptr - __builtin_offsetof(typeof(*req), list))); }), tmp = ({ void *__mptr = (void *)((req)->list.next); do { extern void __compiletime_assert_1741(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((req)->list.next)), typeof(((typeof(*(req)) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((req)->list.next)), typeof(void))))) __compiletime_assert_1741(); } while (0); ((typeof(*(req)) *)(__mptr - __builtin_offsetof(typeof(*(req)), list))); }); &req->list != (&ctx->poll_list); req = tmp, tmp = ({ void *__mptr = (void *)((tmp)->list.next); do { extern void __compiletime_assert_1742(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((tmp)->list.next)), typeof(((typeof(*(tmp)) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((tmp)->list.next)), typeof(void))))) __compiletime_assert_1742(); } while (0); ((typeof(*(tmp)) *)(__mptr - __builtin_offsetof(typeof(*(tmp)), list))); })) { + struct kiocb *kiocb = &req->rw.kiocb; + + + + + + + if (({ do { extern void __compiletime_assert_1743(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(req->iopoll_completed) == sizeof(char) || sizeof(req->iopoll_completed) == sizeof(short) || sizeof(req->iopoll_completed) == sizeof(int) || sizeof(req->iopoll_completed) == sizeof(long)) || sizeof(req->iopoll_completed) == sizeof(long long))) __compiletime_assert_1743(); } while (0); ({ typeof( _Generic((req->iopoll_completed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (req->iopoll_completed))) __x = (*(const volatile typeof( _Generic((req->iopoll_completed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (req->iopoll_completed))) *)&(req->iopoll_completed)); do { } while (0); (typeof(req->iopoll_completed))__x; }); })) { + list_move_tail(&req->list, &done); + continue; + } + if (!list_empty(&done)) + break; + + ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); + if (ret < 0) + break; + + + if (({ do { extern void __compiletime_assert_1744(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(req->iopoll_completed) == sizeof(char) || sizeof(req->iopoll_completed) == sizeof(short) || sizeof(req->iopoll_completed) == sizeof(int) || sizeof(req->iopoll_completed) == sizeof(long)) || sizeof(req->iopoll_completed) == sizeof(long long))) __compiletime_assert_1744(); } while (0); ({ typeof( _Generic((req->iopoll_completed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (req->iopoll_completed))) __x = (*(const volatile typeof( _Generic((req->iopoll_completed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (req->iopoll_completed))) *)&(req->iopoll_completed)); do { } while (0); (typeof(req->iopoll_completed))__x; }); })) + list_move_tail(&req->list, &done); + + if (ret && spin) + spin = false; + ret = 0; + } + + if (!list_empty(&done)) + io_iopoll_complete(ctx, nr_events, &done); + + return ret; +} + + + + + + +static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, + long min) +{ + while (!list_empty(&ctx->poll_list) && !need_resched()) { + int ret; + + ret = io_do_iopoll(ctx, nr_events, min); + if (ret < 0) + return ret; + if (*nr_events >= min) + return 0; + } + + return 1; +} + + + + + +static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) +{ + if (!(ctx->flags & (1U << 0))) + return; + + mutex_lock_nested(&ctx->uring_lock, 0); + while (!list_empty(&ctx->poll_list)) { + unsigned int nr_events = 0; + + io_do_iopoll(ctx, &nr_events, 0); + + + if (nr_events == 0) + break; + + + + + + if (need_resched()) { + mutex_unlock(&ctx->uring_lock); + ({ ___might_sleep("fs/io_uring.c", 2079, 0); _cond_resched(); }); + mutex_lock_nested(&ctx->uring_lock, 0); + } + } + mutex_unlock(&ctx->uring_lock); +} + +static int io_iopoll_check(struct io_ring_ctx *ctx, long min) +{ + unsigned int nr_events = 0; + int iters = 0, ret = 0; + + + + + + + mutex_lock_nested(&ctx->uring_lock, 0); + do { + + + + + + if (io_cqring_events(ctx, false)) + break; +# 2116 "fs/io_uring.c" + if (!(++iters & 7)) { + mutex_unlock(&ctx->uring_lock); + io_run_task_work(); + mutex_lock_nested(&ctx->uring_lock, 0); + } + + ret = io_iopoll_getevents(ctx, &nr_events, min); + if (ret <= 0) + break; + ret = 0; + } while (min && !nr_events && !need_resched()); + + mutex_unlock(&ctx->uring_lock); + return ret; +} + +static void kiocb_end_write(struct io_kiocb *req) +{ + + + + + if (req->flags & REQ_F_ISREG) { + struct inode *inode = file_inode(req->file); + + percpu_rwsem_acquire(&(inode->i_sb)->s_writers.rw_sem[(SB_FREEZE_WRITE)-1], 1, ({ __label__ __here; __here: (unsigned long)&&__here; })); + } + file_end_write(req->file); +} + +static void io_complete_rw_common(struct kiocb *kiocb, long res, + struct io_comp_state *cs) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(kiocb); do { extern void __compiletime_assert_1745(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(kiocb)), typeof(((struct io_kiocb *)0)->rw.kiocb)) && !__builtin_types_compatible_p(typeof(*(kiocb)), typeof(void))))) __compiletime_assert_1745(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, rw.kiocb))); }); + int cflags = 0; + + if (kiocb->ki_flags & (1 << 6)) + kiocb_end_write(req); + + if (res != req->result) + req_set_fail_links(req); + if (req->flags & REQ_F_BUFFER_SELECTED) + cflags = io_put_kbuf(req); + __io_req_complete(req, res, cflags, cs); +} + + +static bool io_resubmit_prep(struct io_kiocb *req, int error) +{ + struct iovec inline_vecs[8], *iovec = inline_vecs; + ssize_t ret = -125; + struct iov_iter iter; + int rw; + + if (error) { + ret = error; + goto end_req; + } + + switch (req->opcode) { + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_READ: + rw = 0; + break; + case IORING_OP_WRITEV: + case IORING_OP_WRITE_FIXED: + case IORING_OP_WRITE: + rw = 1; + break; + default: + ({ static bool __attribute__((__section__(".data.once"))) __print_once; bool __ret_print_once = !__print_once; if (!__print_once) { __print_once = true; printk("\001" "4" "io_uring: bad opcode in resubmit %d\n", req->opcode); } __builtin_expect(!!(__ret_print_once), 0); }) + ; + goto end_req; + } + + ret = io_import_iovec(rw, req, &iovec, &iter, false); + if (ret < 0) + goto end_req; + ret = io_setup_async_rw(req, ret, iovec, inline_vecs, &iter); + if (!ret) + return true; + kfree(iovec); +end_req: + req_set_fail_links(req); + io_req_complete(req, ret); + return false; +} + +static void io_rw_resubmit(struct callback_head *cb) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(cb); do { extern void __compiletime_assert_1746(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(cb)), typeof(((struct io_kiocb *)0)->task_work)) && !__builtin_types_compatible_p(typeof(*(cb)), typeof(void))))) __compiletime_assert_1746(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, task_work))); }); + struct io_ring_ctx *ctx = req->ctx; + int err; + + err = io_sq_thread_acquire_mm(ctx, req); + + if (io_resubmit_prep(req, err)) { + refcount_inc(&req->refs); + io_queue_async_work(req); + } +} + + +static bool io_rw_reissue(struct io_kiocb *req, long res) +{ + + int ret; + + if ((res != -11 && res != -95) || io_wq_current_is_worker()) + return false; + + init_task_work(&req->task_work, io_rw_resubmit); + ret = io_req_task_work_add(req, &req->task_work); + if (!ret) + return true; + + return false; +} + +static void __io_complete_rw(struct io_kiocb *req, long res, long res2, + struct io_comp_state *cs) +{ + if (!io_rw_reissue(req, res)) + io_complete_rw_common(&req->rw.kiocb, res, cs); +} + +static void io_complete_rw(struct kiocb *kiocb, long res, long res2) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(kiocb); do { extern void __compiletime_assert_1747(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(kiocb)), typeof(((struct io_kiocb *)0)->rw.kiocb)) && !__builtin_types_compatible_p(typeof(*(kiocb)), typeof(void))))) __compiletime_assert_1747(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, rw.kiocb))); }); + + __io_complete_rw(req, res, res2, ((void *)0)); +} + +static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(kiocb); do { extern void __compiletime_assert_1748(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(kiocb)), typeof(((struct io_kiocb *)0)->rw.kiocb)) && !__builtin_types_compatible_p(typeof(*(kiocb)), typeof(void))))) __compiletime_assert_1748(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, rw.kiocb))); }); + + if (kiocb->ki_flags & (1 << 6)) + kiocb_end_write(req); + + if (res != -11 && res != req->result) + req_set_fail_links(req); + + do { do { extern void __compiletime_assert_1749(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(req->result) == sizeof(char) || sizeof(req->result) == sizeof(short) || sizeof(req->result) == sizeof(int) || sizeof(req->result) == sizeof(long)) || sizeof(req->result) == sizeof(long long))) __compiletime_assert_1749(); } while (0); do { *(volatile typeof(req->result) *)&(req->result) = (res); } while (0); } while (0); + + __asm__ __volatile__("": : :"memory"); + do { do { extern void __compiletime_assert_1750(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(req->iopoll_completed) == sizeof(char) || sizeof(req->iopoll_completed) == sizeof(short) || sizeof(req->iopoll_completed) == sizeof(int) || sizeof(req->iopoll_completed) == sizeof(long)) || sizeof(req->iopoll_completed) == sizeof(long long))) __compiletime_assert_1750(); } while (0); do { *(volatile typeof(req->iopoll_completed) *)&(req->iopoll_completed) = (1); } while (0); } while (0); +} + + + + + + + +static void io_iopoll_req_issued(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + + + + + + if (list_empty(&ctx->poll_list)) { + ctx->poll_multi_file = false; + } else if (!ctx->poll_multi_file) { + struct io_kiocb *list_req; + + list_req = ({ void *__mptr = (void *)((&ctx->poll_list)->next); do { extern void __compiletime_assert_1751(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ctx->poll_list)->next)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&ctx->poll_list)->next)), typeof(void))))) __compiletime_assert_1751(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }) + ; + if (list_req->file != req->file) + ctx->poll_multi_file = true; + } + + + + + + if (({ do { extern void __compiletime_assert_1752(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(req->iopoll_completed) == sizeof(char) || sizeof(req->iopoll_completed) == sizeof(short) || sizeof(req->iopoll_completed) == sizeof(int) || sizeof(req->iopoll_completed) == sizeof(long)) || sizeof(req->iopoll_completed) == sizeof(long long))) __compiletime_assert_1752(); } while (0); ({ typeof( _Generic((req->iopoll_completed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (req->iopoll_completed))) __x = (*(const volatile typeof( _Generic((req->iopoll_completed), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (req->iopoll_completed))) *)&(req->iopoll_completed)); do { } while (0); (typeof(req->iopoll_completed))__x; }); })) + list_add(&req->list, &ctx->poll_list); + else + list_add_tail(&req->list, &ctx->poll_list); + + if ((ctx->flags & (1U << 1)) && + wq_has_sleeper(&ctx->sqo_wait)) + __wake_up(&ctx->sqo_wait, (0x0001 | 0x0002), 1, ((void *)0)); +} + +static void __io_state_file_put(struct io_submit_state *state) +{ + int diff = state->has_refs - state->used_refs; + + if (diff) + fput_many(state->file, diff); + state->file = ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void io_state_file_put(struct io_submit_state *state) +{ + if (state->file) + __io_state_file_put(state); +} + + + + + + +static struct file *__io_file_get(struct io_submit_state *state, int fd) +{ + if (!state) + return fget(fd); + + if (state->file) { + if (state->fd == fd) { + state->used_refs++; + state->ios_left--; + return state->file; + } + __io_state_file_put(state); + } + state->file = fget_many(fd, state->ios_left); + if (!state->file) + return ((void *)0); + + state->fd = fd; + state->has_refs = state->ios_left; + state->used_refs = 1; + state->ios_left--; + return state->file; +} + +static bool io_bdev_nowait(struct block_device *bdev) +{ + + return !bdev || queue_is_mq(bdev_get_queue(bdev)); + + + +} + + + + + + +static bool io_file_supports_async(struct file *file, int rw) +{ + umode_t mode = file_inode(file)->i_mode; + + if ((((mode) & 00170000) == 0060000)) { + if (io_bdev_nowait(file->f_inode->i_bdev)) + return true; + return false; + } + if ((((mode) & 00170000) == 0020000) || (((mode) & 00170000) == 0140000)) + return true; + if ((((mode) & 00170000) == 0100000)) { + if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) && + file->f_op != &io_uring_fops) + return true; + return false; + } + + + if (file->f_flags & 00004000) + return true; + + if (!(file->f_mode & (( fmode_t)0x8000000))) + return false; + + if (rw == 0) + return file->f_op->read_iter != ((void *)0); + + return file->f_op->write_iter != ((void *)0); +} + +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, + bool force_nonblock) +{ + struct io_ring_ctx *ctx = req->ctx; + struct kiocb *kiocb = &req->rw.kiocb; + unsigned ioprio; + int ret; + + if ((((file_inode(req->file)->i_mode) & 00170000) == 0100000)) + req->flags |= REQ_F_ISREG; + + kiocb->ki_pos = ({ do { extern void __compiletime_assert_1753(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1753(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + if (kiocb->ki_pos == -1 && !(req->file->f_mode & (( fmode_t)0x200000))) { + req->flags |= REQ_F_CUR_POS; + kiocb->ki_pos = req->file->f_pos; + } + kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); + kiocb->ki_flags = iocb_flags(kiocb->ki_filp); + ret = kiocb_set_rw_flags(kiocb, ({ do { extern void __compiletime_assert_1754(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->rw_flags) == sizeof(char) || sizeof(sqe->rw_flags) == sizeof(short) || sizeof(sqe->rw_flags) == sizeof(int) || sizeof(sqe->rw_flags) == sizeof(long)) || sizeof(sqe->rw_flags) == sizeof(long long))) __compiletime_assert_1754(); } while (0); ({ typeof( _Generic((sqe->rw_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->rw_flags))) __x = (*(const volatile typeof( _Generic((sqe->rw_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->rw_flags))) *)&(sqe->rw_flags)); do { } while (0); (typeof(sqe->rw_flags))__x; }); })); + if (__builtin_expect(!!(ret), 0)) + return ret; + + ioprio = ({ do { extern void __compiletime_assert_1755(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->ioprio) == sizeof(char) || sizeof(sqe->ioprio) == sizeof(short) || sizeof(sqe->ioprio) == sizeof(int) || sizeof(sqe->ioprio) == sizeof(long)) || sizeof(sqe->ioprio) == sizeof(long long))) __compiletime_assert_1755(); } while (0); ({ typeof( _Generic((sqe->ioprio), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->ioprio))) __x = (*(const volatile typeof( _Generic((sqe->ioprio), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->ioprio))) *)&(sqe->ioprio)); do { } while (0); (typeof(sqe->ioprio))__x; }); }); + if (ioprio) { + ret = ioprio_check_cap(ioprio); + if (ret) + return ret; + + kiocb->ki_ioprio = ioprio; + } else + kiocb->ki_ioprio = get_current_ioprio(); + + + if (kiocb->ki_flags & (1 << 7)) + req->flags |= REQ_F_NOWAIT; + + if (kiocb->ki_flags & (1 << 2)) + io_get_req_task(req); + + if (force_nonblock) + kiocb->ki_flags |= (1 << 7); + + if (ctx->flags & (1U << 0)) { + if (!(kiocb->ki_flags & (1 << 2)) || + !kiocb->ki_filp->f_op->iopoll) + return -95; + + kiocb->ki_flags |= (1 << 3); + kiocb->ki_complete = io_complete_rw_iopoll; + req->iopoll_completed = 0; + io_get_req_task(req); + } else { + if (kiocb->ki_flags & (1 << 3)) + return -22; + kiocb->ki_complete = io_complete_rw; + } + + req->rw.addr = ({ do { extern void __compiletime_assert_1756(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1756(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }); + req->rw.len = ({ do { extern void __compiletime_assert_1757(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1757(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + req->buf_index = ({ do { extern void __compiletime_assert_1758(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->buf_index) == sizeof(char) || sizeof(sqe->buf_index) == sizeof(short) || sizeof(sqe->buf_index) == sizeof(int) || sizeof(sqe->buf_index) == sizeof(long)) || sizeof(sqe->buf_index) == sizeof(long long))) __compiletime_assert_1758(); } while (0); ({ typeof( _Generic((sqe->buf_index), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->buf_index))) __x = (*(const volatile typeof( _Generic((sqe->buf_index), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->buf_index))) *)&(sqe->buf_index)); do { } while (0); (typeof(sqe->buf_index))__x; }); }); + return 0; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void io_rw_done(struct kiocb *kiocb, ssize_t ret) +{ + switch (ret) { + case -529: + break; + case -512: + case -513: + case -514: + case -516: + + + + + + ret = -4; + + default: + kiocb->ki_complete(kiocb, ret, 0); + } +} + +static void kiocb_done(struct kiocb *kiocb, ssize_t ret, + struct io_comp_state *cs) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(kiocb); do { extern void __compiletime_assert_1759(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(kiocb)), typeof(((struct io_kiocb *)0)->rw.kiocb)) && !__builtin_types_compatible_p(typeof(*(kiocb)), typeof(void))))) __compiletime_assert_1759(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, rw.kiocb))); }); + + if (req->flags & REQ_F_CUR_POS) + req->file->f_pos = kiocb->ki_pos; + if (ret >= 0 && kiocb->ki_complete == io_complete_rw) + __io_complete_rw(req, ret, 0, cs); + else + io_rw_done(kiocb, ret); +} + +static ssize_t io_import_fixed(struct io_kiocb *req, int rw, + struct iov_iter *iter) +{ + struct io_ring_ctx *ctx = req->ctx; + size_t len = req->rw.len; + struct io_mapped_ubuf *imu; + u16 index, buf_index; + size_t offset; + u64 buf_addr; + + + if (__builtin_expect(!!(!ctx->user_bufs), 0)) + return -14; + + buf_index = req->buf_index; + if (__builtin_expect(!!(buf_index >= ctx->nr_user_bufs), 0)) + return -14; + + index = ({ typeof(buf_index) _i = (buf_index); typeof(ctx->nr_user_bufs) _s = (ctx->nr_user_bufs); unsigned long _mask = array_index_mask_nospec(_i, _s); do { extern void __compiletime_assert_1760(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(_i) > sizeof(long)"))); if (!(!(sizeof(_i) > sizeof(long)))) __compiletime_assert_1760(); } while (0); do { extern void __compiletime_assert_1761(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(_s) > sizeof(long)"))); if (!(!(sizeof(_s) > sizeof(long)))) __compiletime_assert_1761(); } while (0); (typeof(_i)) (_i & _mask); }); + imu = &ctx->user_bufs[index]; + buf_addr = req->rw.addr; + + + if (buf_addr + len < buf_addr) + return -14; + + if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len) + return -14; + + + + + + offset = buf_addr - imu->ubuf; + iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len); + + if (offset) { +# 2545 "fs/io_uring.c" + const struct bio_vec *bvec = imu->bvec; + + if (offset <= bvec->bv_len) { + iov_iter_advance(iter, offset); + } else { + unsigned long seg_skip; + + + offset -= bvec->bv_len; + seg_skip = 1 + (offset >> 12); + + iter->bvec = bvec + seg_skip; + iter->nr_segs -= seg_skip; + iter->count -= bvec->bv_len + offset; + iter->iov_offset = offset & ~(~(((1UL) << 12)-1)); + } + } + + return len; +} + +static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock) +{ + if (needs_lock) + mutex_unlock(&ctx->uring_lock); +} + +static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock) +{ + + + + + + + if (needs_lock) + mutex_lock_nested(&ctx->uring_lock, 0); +} + +static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, + int bgid, struct io_buffer *kbuf, + bool needs_lock) +{ + struct io_buffer *head; + + if (req->flags & REQ_F_BUFFER_SELECTED) + return kbuf; + + io_ring_submit_lock(req->ctx, needs_lock); + + do { ({ int __ret_warn_on = !!(debug_locks && !lock_is_held(&(&req->ctx->uring_lock)->dep_map)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1762)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("fs/io_uring.c"), "i" (2595), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1763)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1764)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); + + head = idr_find(&req->ctx->io_buffer_idr, bgid); + if (head) { + if (!list_empty(&head->list)) { + kbuf = ({ void *__mptr = (void *)((&head->list)->prev); do { extern void __compiletime_assert_1765(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&head->list)->prev)), typeof(((struct io_buffer *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&head->list)->prev)), typeof(void))))) __compiletime_assert_1765(); } while (0); ((struct io_buffer *)(__mptr - __builtin_offsetof(struct io_buffer, list))); }) + ; + list_del(&kbuf->list); + } else { + kbuf = head; + idr_remove(&req->ctx->io_buffer_idr, bgid); + } + if (*len > kbuf->len) + *len = kbuf->len; + } else { + kbuf = ERR_PTR(-105); + } + + io_ring_submit_unlock(req->ctx, needs_lock); + + return kbuf; +} + +static void *io_rw_buffer_select(struct io_kiocb *req, size_t *len, + bool needs_lock) +{ + struct io_buffer *kbuf; + u16 bgid; + + kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; + bgid = req->buf_index; + kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); + if (IS_ERR(kbuf)) + return kbuf; + req->rw.addr = (u64) (unsigned long) kbuf; + req->flags |= REQ_F_BUFFER_SELECTED; + return ( { ({ u64 __dummy; typeof((kbuf->addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(kbuf->addr); } ); +} + + +static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, + bool needs_lock) +{ + struct compat_iovec *uiov; + compat_ssize_t clen; + void *buf; + ssize_t len; + + uiov = ( { ({ u64 __dummy; typeof((req->rw.addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(req->rw.addr); } ); + if (!({ ({ int __ret_warn_on = !!(!(!(preempt_count() & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8)) | (1UL << (0 + 8))))) && !pagefault_disabled()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1766)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("fs/io_uring.c"), "i" (2644), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1767)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1768)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); __builtin_expect(!!(!({ (void)0; __chk_range_not_ok((unsigned long )(uiov), sizeof(*uiov), (get_current()->thread.addr_limit.seg)); })), 1); })) + return -14; + if (({ int __gu_err; __typeof__( __builtin_choose_expr(sizeof(*((&uiov->iov_len)))<=sizeof(char),(unsigned char)0,__builtin_choose_expr(sizeof(*((&uiov->iov_len)))<=sizeof(short),(unsigned short)0,__builtin_choose_expr(sizeof(*((&uiov->iov_len)))<=sizeof(int),(unsigned int)0,__builtin_choose_expr(sizeof(*((&uiov->iov_len)))<=sizeof(long),(unsigned long)0,0ULL))))) __gu_val; __typeof__((&uiov->iov_len)) __gu_ptr = ((&uiov->iov_len)); __typeof__(sizeof(*(&uiov->iov_len))) __gu_size = (sizeof(*(&uiov->iov_len))); ({ stac(); asm volatile ("# ALT: oldnstr\n" "661:\n\t" "" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+18)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "lfence" "\n" "665""1" ":\n" ".popsection\n" : : : "memory"); }); do { __gu_err = 0; (void)0; switch (__gu_size) { case 1: asm volatile("\n" "1: mov""b"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""b"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=q"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""w"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 4: asm volatile("\n" "1: mov""l"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""l"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""q"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; default: (__gu_val) = __get_user_bad(); } } while (0); clac(); ((clen)) = ( __typeof__(*((&uiov->iov_len))))__gu_val; __builtin_expect(__gu_err, 0); })) + return -14; + if (clen < 0) + return -22; + + len = clen; + buf = io_rw_buffer_select(req, &len, needs_lock); + if (IS_ERR(buf)) + return PTR_ERR(buf); + iov[0].iov_base = buf; + iov[0].iov_len = (compat_size_t) len; + return 0; +} + + +static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, + bool needs_lock) +{ + struct iovec *uiov = ( { ({ u64 __dummy; typeof((req->rw.addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(req->rw.addr); } ); + void *buf; + ssize_t len; + + if (copy_from_user(iov, uiov, sizeof(*uiov))) + return -14; + + len = iov[0].iov_len; + if (len < 0) + return -22; + buf = io_rw_buffer_select(req, &len, needs_lock); + if (IS_ERR(buf)) + return PTR_ERR(buf); + iov[0].iov_base = buf; + iov[0].iov_len = len; + return 0; +} + +static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, + bool needs_lock) +{ + if (req->flags & REQ_F_BUFFER_SELECTED) { + struct io_buffer *kbuf; + + kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; + iov[0].iov_base = ( { ({ u64 __dummy; typeof((kbuf->addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(kbuf->addr); } ); + iov[0].iov_len = kbuf->len; + return 0; + } + if (!req->rw.len) + return 0; + else if (req->rw.len > 1) + return -22; + + + if (req->ctx->compat) + return io_compat_import(req, iov, needs_lock); + + + return __io_iov_buffer_select(req, iov, needs_lock); +} + +static ssize_t io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter, + bool needs_lock) +{ + void *buf = ( { ({ u64 __dummy; typeof((req->rw.addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(req->rw.addr); } ); + size_t sqe_len = req->rw.len; + ssize_t ret; + u8 opcode; + + opcode = req->opcode; + if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { + *iovec = ((void *)0); + return io_import_fixed(req, rw, iter); + } + + + if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) + return -22; + + if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { + if (req->flags & REQ_F_BUFFER_SELECT) { + buf = io_rw_buffer_select(req, &sqe_len, needs_lock); + if (IS_ERR(buf)) { + *iovec = ((void *)0); + return PTR_ERR(buf); + } + req->rw.len = sqe_len; + } + + ret = import_single_range(rw, buf, sqe_len, *iovec, iter); + *iovec = ((void *)0); + return ret < 0 ? ret : sqe_len; + } + + if (req->io) { + struct io_async_rw *iorw = &req->io->rw; + + *iovec = iorw->iov; + iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size); + if (iorw->iov == iorw->fast_iov) + *iovec = ((void *)0); + return iorw->size; + } + + if (req->flags & REQ_F_BUFFER_SELECT) { + ret = io_iov_buffer_select(req, *iovec, needs_lock); + if (!ret) { + ret = (*iovec)->iov_len; + iov_iter_init(iter, rw, *iovec, 1, ret); + } + *iovec = ((void *)0); + return ret; + } + + + if (req->ctx->compat) + return compat_import_iovec(rw, buf, sqe_len, 8, + iovec, iter); + + + return import_iovec(rw, buf, sqe_len, 8, iovec, iter); +} + + + + + +static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, + struct iov_iter *iter) +{ + ssize_t ret = 0; + + + + + + + if (kiocb->ki_flags & (1 << 3)) + return -95; + if (kiocb->ki_flags & (1 << 7)) + return -11; + + while (iov_iter_count(iter)) { + struct iovec iovec; + ssize_t nr; + + if (!iov_iter_is_bvec(iter)) { + iovec = iov_iter_iovec(iter); + } else { + + iovec.iov_base = kmap(iter->bvec->bv_page) + + iter->iov_offset; + iovec.iov_len = __builtin_choose_expr(((!!(sizeof((typeof(iter->count) *)1 == (typeof(iter->bvec->bv_len - iter->iov_offset) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(iter->count) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(iter->bvec->bv_len - iter->iov_offset) * 0l)) : (int *)8))))), ((iter->count) < (iter->bvec->bv_len - iter->iov_offset) ? (iter->count) : (iter->bvec->bv_len - iter->iov_offset)), ({ typeof(iter->count) __UNIQUE_ID___x1769 = (iter->count); typeof(iter->bvec->bv_len - iter->iov_offset) __UNIQUE_ID___y1770 = (iter->bvec->bv_len - iter->iov_offset); ((__UNIQUE_ID___x1769) < (__UNIQUE_ID___y1770) ? (__UNIQUE_ID___x1769) : (__UNIQUE_ID___y1770)); })) + ; + } + + if (rw == 0) { + nr = file->f_op->read(file, iovec.iov_base, + iovec.iov_len, &kiocb->ki_pos); + } else { + nr = file->f_op->write(file, iovec.iov_base, + iovec.iov_len, &kiocb->ki_pos); + } + + if (iov_iter_is_bvec(iter)) + kunmap(iter->bvec->bv_page); + + if (nr < 0) { + if (!ret) + ret = nr; + break; + } + ret += nr; + if (nr != iovec.iov_len) + break; + iov_iter_advance(iter, nr); + } + + return ret; +} + +static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size, + struct iovec *iovec, struct iovec *fast_iov, + struct iov_iter *iter) +{ + req->io->rw.nr_segs = iter->nr_segs; + req->io->rw.size = io_size; + req->io->rw.iov = iovec; + if (!req->io->rw.iov) { + req->io->rw.iov = req->io->rw.fast_iov; + if (req->io->rw.iov != fast_iov) + memcpy(req->io->rw.iov, fast_iov, + sizeof(struct iovec) * iter->nr_segs); + } else { + req->flags |= REQ_F_NEED_CLEANUP; + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __io_alloc_async_ctx(struct io_kiocb *req) +{ + req->io = kmalloc(sizeof(*req->io), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + return req->io == ((void *)0); +} + +static int io_alloc_async_ctx(struct io_kiocb *req) +{ + if (!io_op_defs[req->opcode].async_ctx) + return 0; + + return __io_alloc_async_ctx(req); +} + +static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, + struct iovec *iovec, struct iovec *fast_iov, + struct iov_iter *iter) +{ + if (!io_op_defs[req->opcode].async_ctx) + return 0; + if (!req->io) { + if (__io_alloc_async_ctx(req)) + return -12; + + io_req_map_rw(req, io_size, iovec, fast_iov, iter); + } + return 0; +} + +static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, + bool force_nonblock) +{ + struct io_async_ctx *io; + struct iov_iter iter; + ssize_t ret; + + ret = io_prep_rw(req, sqe, force_nonblock); + if (ret) + return ret; + + if (__builtin_expect(!!(!(req->file->f_mode & (( fmode_t)0x1))), 0)) + return -9; + + + if (!req->io || req->flags & REQ_F_NEED_CLEANUP) + return 0; + + io = req->io; + io->rw.iov = io->rw.fast_iov; + req->io = ((void *)0); + ret = io_import_iovec(0, req, &io->rw.iov, &iter, !force_nonblock); + req->io = io; + if (ret < 0) + return ret; + + io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter); + return 0; +} + +static void io_async_buf_cancel(struct callback_head *cb) +{ + struct io_async_rw *rw; + struct io_kiocb *req; + + rw = ({ void *__mptr = (void *)(cb); do { extern void __compiletime_assert_1771(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(cb)), typeof(((struct io_async_rw *)0)->task_work)) && !__builtin_types_compatible_p(typeof(*(cb)), typeof(void))))) __compiletime_assert_1771(); } while (0); ((struct io_async_rw *)(__mptr - __builtin_offsetof(struct io_async_rw, task_work))); }); + req = rw->wpq.wait.private; + __io_req_task_cancel(req, -125); +} + +static void io_async_buf_retry(struct callback_head *cb) +{ + struct io_async_rw *rw; + struct io_kiocb *req; + + rw = ({ void *__mptr = (void *)(cb); do { extern void __compiletime_assert_1772(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(cb)), typeof(((struct io_async_rw *)0)->task_work)) && !__builtin_types_compatible_p(typeof(*(cb)), typeof(void))))) __compiletime_assert_1772(); } while (0); ((struct io_async_rw *)(__mptr - __builtin_offsetof(struct io_async_rw, task_work))); }); + req = rw->wpq.wait.private; + + __io_req_task_submit(req); +} + +static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, + int sync, void *arg) +{ + struct wait_page_queue *wpq; + struct io_kiocb *req = wait->private; + struct io_async_rw *rw = &req->io->rw; + struct wait_page_key *key = arg; + int ret; + + wpq = ({ void *__mptr = (void *)(wait); do { extern void __compiletime_assert_1773(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(wait)), typeof(((struct wait_page_queue *)0)->wait)) && !__builtin_types_compatible_p(typeof(*(wait)), typeof(void))))) __compiletime_assert_1773(); } while (0); ((struct wait_page_queue *)(__mptr - __builtin_offsetof(struct wait_page_queue, wait))); }); + + ret = wake_page_match(wpq, key); + if (ret != 1) + return ret; + + list_del_init(&wait->entry); + + init_task_work(&rw->task_work, io_async_buf_retry); + + refcount_inc(&req->refs); + ret = io_req_task_work_add(req, &rw->task_work); + if (__builtin_expect(!!(ret), 0)) { + struct task_struct *tsk; + + + init_task_work(&rw->task_work, io_async_buf_cancel); + tsk = io_wq_get_task(req->ctx->io_wq); + task_work_add(tsk, &rw->task_work, 0); + wake_up_process(tsk); + } + return 1; +} + +static bool io_rw_should_retry(struct io_kiocb *req) +{ + struct kiocb *kiocb = &req->rw.kiocb; + int ret; + + + if (req->flags & REQ_F_NOWAIT) + return false; + + + if (kiocb->ki_flags & ((1 << 2) | (1 << 8))) + return false; + + + + + if (file_can_poll(req->file) || !(req->file->f_mode & (( fmode_t)0x40000000))) + return false; + + + + + + if (!req->io && __io_alloc_async_ctx(req)) + return false; + + ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq, + io_async_buf_func, req); + if (!ret) { + io_get_req_task(req); + return true; + } + + return false; +} + +static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) +{ + if (req->file->f_op->read_iter) + return call_read_iter(req->file, &req->rw.kiocb, iter); + return loop_rw_iter(0, req->file, &req->rw.kiocb, iter); +} + +static int io_read(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct iovec inline_vecs[8], *iovec = inline_vecs; + struct kiocb *kiocb = &req->rw.kiocb; + struct iov_iter iter; + size_t iov_count; + ssize_t io_size, ret; + + ret = io_import_iovec(0, req, &iovec, &iter, !force_nonblock); + if (ret < 0) + return ret; + + + if (!force_nonblock) + kiocb->ki_flags &= ~(1 << 7); + + io_size = ret; + req->result = io_size; + + + if (force_nonblock && !io_file_supports_async(req->file, 0)) + goto copy_iov; + + iov_count = iov_iter_count(&iter); + ret = rw_verify_area(0, req->file, &kiocb->ki_pos, iov_count); + if (!ret) { + unsigned long nr_segs = iter.nr_segs; + ssize_t ret2 = 0; + + ret2 = io_iter_do_read(req, &iter); + + + if (!force_nonblock || (ret2 != -11 && ret2 != -5)) { + kiocb_done(kiocb, ret2, cs); + } else { + iter.count = iov_count; + iter.nr_segs = nr_segs; +copy_iov: + ret = io_setup_async_rw(req, io_size, iovec, + inline_vecs, &iter); + if (ret) + goto out_free; + + if (io_rw_should_retry(req)) { + ret2 = io_iter_do_read(req, &iter); + if (ret2 == -529) { + goto out_free; + } else if (ret2 != -11) { + kiocb_done(kiocb, ret2, cs); + goto out_free; + } + } + kiocb->ki_flags &= ~(1 << 8); + return -11; + } + } +out_free: + if (!(req->flags & REQ_F_NEED_CLEANUP)) + kfree(iovec); + return ret; +} + +static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, + bool force_nonblock) +{ + struct io_async_ctx *io; + struct iov_iter iter; + ssize_t ret; + + ret = io_prep_rw(req, sqe, force_nonblock); + if (ret) + return ret; + + if (__builtin_expect(!!(!(req->file->f_mode & (( fmode_t)0x2))), 0)) + return -9; + + req->fsize = rlimit(1); + + + if (!req->io || req->flags & REQ_F_NEED_CLEANUP) + return 0; + + io = req->io; + io->rw.iov = io->rw.fast_iov; + req->io = ((void *)0); + ret = io_import_iovec(1, req, &io->rw.iov, &iter, !force_nonblock); + req->io = io; + if (ret < 0) + return ret; + + io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter); + return 0; +} + +static int io_write(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct iovec inline_vecs[8], *iovec = inline_vecs; + struct kiocb *kiocb = &req->rw.kiocb; + struct iov_iter iter; + size_t iov_count; + ssize_t ret, io_size; + + ret = io_import_iovec(1, req, &iovec, &iter, !force_nonblock); + if (ret < 0) + return ret; + + + if (!force_nonblock) + req->rw.kiocb.ki_flags &= ~(1 << 7); + + io_size = ret; + req->result = io_size; + + + if (force_nonblock && !io_file_supports_async(req->file, 1)) + goto copy_iov; + + + if (force_nonblock && !(kiocb->ki_flags & (1 << 2)) && + (req->flags & REQ_F_ISREG)) + goto copy_iov; + + iov_count = iov_iter_count(&iter); + ret = rw_verify_area(1, req->file, &kiocb->ki_pos, iov_count); + if (!ret) { + unsigned long nr_segs = iter.nr_segs; + ssize_t ret2; +# 3137 "fs/io_uring.c" + if (req->flags & REQ_F_ISREG) { + __sb_start_write(file_inode(req->file)->i_sb, + SB_FREEZE_WRITE, true); + percpu_rwsem_release(&(file_inode(req->file)->i_sb)->s_writers.rw_sem[(SB_FREEZE_WRITE)-1], 1, ({ __label__ __here; __here: (unsigned long)&&__here; })) + ; + } + kiocb->ki_flags |= (1 << 6); + + if (!force_nonblock) + get_current()->signal->rlim[1].rlim_cur = req->fsize; + + if (req->file->f_op->write_iter) + ret2 = call_write_iter(req->file, kiocb, &iter); + else + ret2 = loop_rw_iter(1, req->file, kiocb, &iter); + + if (!force_nonblock) + get_current()->signal->rlim[1].rlim_cur = (~0UL); + + + + + + if (ret2 == -95 && (kiocb->ki_flags & (1 << 7))) + ret2 = -11; + if (!force_nonblock || ret2 != -11) { + kiocb_done(kiocb, ret2, cs); + } else { + iter.count = iov_count; + iter.nr_segs = nr_segs; +copy_iov: + ret = io_setup_async_rw(req, io_size, iovec, + inline_vecs, &iter); + if (ret) + goto out_free; + return -11; + } + } +out_free: + if (!(req->flags & REQ_F_NEED_CLEANUP)) + kfree(iovec); + return ret; +} + +static int __io_splice_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_splice* sp = &req->splice; + unsigned int valid_flags = (1U << 31) | ((0x01)|(0x02)|(0x04)|(0x08)); + int ret; + + if (req->flags & REQ_F_NEED_CLEANUP) + return 0; + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + + sp->file_in = ((void *)0); + sp->len = ({ do { extern void __compiletime_assert_1774(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1774(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + sp->flags = ({ do { extern void __compiletime_assert_1775(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->splice_flags) == sizeof(char) || sizeof(sqe->splice_flags) == sizeof(short) || sizeof(sqe->splice_flags) == sizeof(int) || sizeof(sqe->splice_flags) == sizeof(long)) || sizeof(sqe->splice_flags) == sizeof(long long))) __compiletime_assert_1775(); } while (0); ({ typeof( _Generic((sqe->splice_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->splice_flags))) __x = (*(const volatile typeof( _Generic((sqe->splice_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->splice_flags))) *)&(sqe->splice_flags)); do { } while (0); (typeof(sqe->splice_flags))__x; }); }); + + if (__builtin_expect(!!(sp->flags & ~valid_flags), 0)) + return -22; + + ret = io_file_get(((void *)0), req, ({ do { extern void __compiletime_assert_1776(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->splice_fd_in) == sizeof(char) || sizeof(sqe->splice_fd_in) == sizeof(short) || sizeof(sqe->splice_fd_in) == sizeof(int) || sizeof(sqe->splice_fd_in) == sizeof(long)) || sizeof(sqe->splice_fd_in) == sizeof(long long))) __compiletime_assert_1776(); } while (0); ({ typeof( _Generic((sqe->splice_fd_in), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->splice_fd_in))) __x = (*(const volatile typeof( _Generic((sqe->splice_fd_in), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->splice_fd_in))) *)&(sqe->splice_fd_in)); do { } while (0); (typeof(sqe->splice_fd_in))__x; }); }), &sp->file_in, + (sp->flags & (1U << 31))); + if (ret) + return ret; + req->flags |= REQ_F_NEED_CLEANUP; + + if (!(((file_inode(sp->file_in)->i_mode) & 00170000) == 0100000)) { + + + + + io_req_init_async(req); + req->work.flags |= IO_WQ_WORK_UNBOUND; + } + + return 0; +} + +static int io_tee_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (({ do { extern void __compiletime_assert_1777(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->splice_off_in) == sizeof(char) || sizeof(sqe->splice_off_in) == sizeof(short) || sizeof(sqe->splice_off_in) == sizeof(int) || sizeof(sqe->splice_off_in) == sizeof(long)) || sizeof(sqe->splice_off_in) == sizeof(long long))) __compiletime_assert_1777(); } while (0); ({ typeof( _Generic((sqe->splice_off_in), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->splice_off_in))) __x = (*(const volatile typeof( _Generic((sqe->splice_off_in), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->splice_off_in))) *)&(sqe->splice_off_in)); do { } while (0); (typeof(sqe->splice_off_in))__x; }); }) || ({ do { extern void __compiletime_assert_1778(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1778(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); })) + return -22; + return __io_splice_prep(req, sqe); +} + +static int io_tee(struct io_kiocb *req, bool force_nonblock) +{ + struct io_splice *sp = &req->splice; + struct file *in = sp->file_in; + struct file *out = sp->file_out; + unsigned int flags = sp->flags & ~(1U << 31); + long ret = 0; + + if (force_nonblock) + return -11; + if (sp->len) + ret = do_tee(in, out, sp->len, flags); + + io_put_file(req, in, (sp->flags & (1U << 31))); + req->flags &= ~REQ_F_NEED_CLEANUP; + + if (ret != sp->len) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + +static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_splice* sp = &req->splice; + + sp->off_in = ({ do { extern void __compiletime_assert_1779(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->splice_off_in) == sizeof(char) || sizeof(sqe->splice_off_in) == sizeof(short) || sizeof(sqe->splice_off_in) == sizeof(int) || sizeof(sqe->splice_off_in) == sizeof(long)) || sizeof(sqe->splice_off_in) == sizeof(long long))) __compiletime_assert_1779(); } while (0); ({ typeof( _Generic((sqe->splice_off_in), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->splice_off_in))) __x = (*(const volatile typeof( _Generic((sqe->splice_off_in), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->splice_off_in))) *)&(sqe->splice_off_in)); do { } while (0); (typeof(sqe->splice_off_in))__x; }); }); + sp->off_out = ({ do { extern void __compiletime_assert_1780(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1780(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + return __io_splice_prep(req, sqe); +} + +static int io_splice(struct io_kiocb *req, bool force_nonblock) +{ + struct io_splice *sp = &req->splice; + struct file *in = sp->file_in; + struct file *out = sp->file_out; + unsigned int flags = sp->flags & ~(1U << 31); + loff_t *poff_in, *poff_out; + long ret = 0; + + if (force_nonblock) + return -11; + + poff_in = (sp->off_in == -1) ? ((void *)0) : &sp->off_in; + poff_out = (sp->off_out == -1) ? ((void *)0) : &sp->off_out; + + if (sp->len) + ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); + + io_put_file(req, in, (sp->flags & (1U << 31))); + req->flags &= ~REQ_F_NEED_CLEANUP; + + if (ret != sp->len) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + + + + +static int io_nop(struct io_kiocb *req, struct io_comp_state *cs) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (__builtin_expect(!!(ctx->flags & (1U << 0)), 0)) + return -22; + + __io_req_complete(req, 0, 0, cs); + return 0; +} + +static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!req->file) + return -9; + + if (__builtin_expect(!!(ctx->flags & (1U << 0)), 0)) + return -22; + if (__builtin_expect(!!(sqe->addr || sqe->ioprio || sqe->buf_index), 0)) + return -22; + + req->sync.flags = ({ do { extern void __compiletime_assert_1781(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fsync_flags) == sizeof(char) || sizeof(sqe->fsync_flags) == sizeof(short) || sizeof(sqe->fsync_flags) == sizeof(int) || sizeof(sqe->fsync_flags) == sizeof(long)) || sizeof(sqe->fsync_flags) == sizeof(long long))) __compiletime_assert_1781(); } while (0); ({ typeof( _Generic((sqe->fsync_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fsync_flags))) __x = (*(const volatile typeof( _Generic((sqe->fsync_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fsync_flags))) *)&(sqe->fsync_flags)); do { } while (0); (typeof(sqe->fsync_flags))__x; }); }); + if (__builtin_expect(!!(req->sync.flags & ~(1U << 0)), 0)) + return -22; + + req->sync.off = ({ do { extern void __compiletime_assert_1782(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1782(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + req->sync.len = ({ do { extern void __compiletime_assert_1783(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1783(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + return 0; +} + +static int io_fsync(struct io_kiocb *req, bool force_nonblock) +{ + loff_t end = req->sync.off + req->sync.len; + int ret; + + + if (force_nonblock) + return -11; + + ret = vfs_fsync_range(req->file, req->sync.off, + end > 0 ? end : ((long long)(~0ULL >> 1)), + req->sync.flags & (1U << 0)); + if (ret < 0) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + +static int io_fallocate_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (sqe->ioprio || sqe->buf_index || sqe->rw_flags) + return -22; + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + + req->sync.off = ({ do { extern void __compiletime_assert_1784(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1784(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + req->sync.len = ({ do { extern void __compiletime_assert_1785(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1785(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }); + req->sync.mode = ({ do { extern void __compiletime_assert_1786(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1786(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + req->fsize = rlimit(1); + return 0; +} + +static int io_fallocate(struct io_kiocb *req, bool force_nonblock) +{ + int ret; + + + if (force_nonblock) + return -11; + + get_current()->signal->rlim[1].rlim_cur = req->fsize; + ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, + req->sync.len); + get_current()->signal->rlim[1].rlim_cur = (~0UL); + if (ret < 0) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + +static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + const char *fname; + int ret; + + if (__builtin_expect(!!(req->ctx->flags & ((1U << 0)|(1U << 1))), 0)) + return -22; + if (__builtin_expect(!!(sqe->ioprio || sqe->buf_index), 0)) + return -22; + if (__builtin_expect(!!(req->flags & REQ_F_FIXED_FILE), 0)) + return -9; + + + if (!(req->open.how.flags & 010000000) && (!0)) + req->open.how.flags |= 00100000; + + req->open.dfd = ({ do { extern void __compiletime_assert_1787(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fd) == sizeof(char) || sizeof(sqe->fd) == sizeof(short) || sizeof(sqe->fd) == sizeof(int) || sizeof(sqe->fd) == sizeof(long)) || sizeof(sqe->fd) == sizeof(long long))) __compiletime_assert_1787(); } while (0); ({ typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) __x = (*(const volatile typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) *)&(sqe->fd)); do { } while (0); (typeof(sqe->fd))__x; }); }); + fname = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1788(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1788(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1788(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1788(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); })); } ); + req->open.filename = getname(fname); + if (IS_ERR(req->open.filename)) { + ret = PTR_ERR(req->open.filename); + req->open.filename = ((void *)0); + return ret; + } + req->open.nofile = rlimit(7); + req->flags |= REQ_F_NEED_CLEANUP; + return 0; +} + +static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + u64 flags, mode; + + if (req->flags & REQ_F_NEED_CLEANUP) + return 0; + mode = ({ do { extern void __compiletime_assert_1789(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1789(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + flags = ({ do { extern void __compiletime_assert_1790(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->open_flags) == sizeof(char) || sizeof(sqe->open_flags) == sizeof(short) || sizeof(sqe->open_flags) == sizeof(int) || sizeof(sqe->open_flags) == sizeof(long)) || sizeof(sqe->open_flags) == sizeof(long long))) __compiletime_assert_1790(); } while (0); ({ typeof( _Generic((sqe->open_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->open_flags))) __x = (*(const volatile typeof( _Generic((sqe->open_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->open_flags))) *)&(sqe->open_flags)); do { } while (0); (typeof(sqe->open_flags))__x; }); }); + req->open.how = build_open_how(flags, mode); + return __io_openat_prep(req, sqe); +} + +static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct open_how *how; + size_t len; + int ret; + + if (req->flags & REQ_F_NEED_CLEANUP) + return 0; + how = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1791(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr2) == sizeof(char) || sizeof(sqe->addr2) == sizeof(short) || sizeof(sqe->addr2) == sizeof(int) || sizeof(sqe->addr2) == sizeof(long)) || sizeof(sqe->addr2) == sizeof(long long))) __compiletime_assert_1791(); } while (0); ({ typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) __x = (*(const volatile typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) *)&(sqe->addr2)); do { } while (0); (typeof(sqe->addr2))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1791(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr2) == sizeof(char) || sizeof(sqe->addr2) == sizeof(short) || sizeof(sqe->addr2) == sizeof(int) || sizeof(sqe->addr2) == sizeof(long)) || sizeof(sqe->addr2) == sizeof(long long))) __compiletime_assert_1791(); } while (0); ({ typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) __x = (*(const volatile typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) *)&(sqe->addr2)); do { } while (0); (typeof(sqe->addr2))__x; }); })); } ); + len = ({ do { extern void __compiletime_assert_1792(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1792(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + if (len < 24) + return -22; + + ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how, + len); + if (ret) + return ret; + + return __io_openat_prep(req, sqe); +} + +static int io_openat2(struct io_kiocb *req, bool force_nonblock) +{ + struct open_flags op; + struct file *file; + int ret; + + if (force_nonblock) + return -11; + + ret = build_open_flags(&req->open.how, &op); + if (ret) + goto err; + + ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); + if (ret < 0) + goto err; + + file = do_filp_open(req->open.dfd, req->open.filename, &op); + if (IS_ERR(file)) { + put_unused_fd(ret); + ret = PTR_ERR(file); + } else { + fsnotify_open(file); + fd_install(ret, file); + } +err: + putname(req->open.filename); + req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret < 0) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + +static int io_openat(struct io_kiocb *req, bool force_nonblock) +{ + return io_openat2(req, force_nonblock); +} + +static int io_remove_buffers_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_provide_buf *p = &req->pbuf; + u64 tmp; + + if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off) + return -22; + + tmp = ({ do { extern void __compiletime_assert_1793(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fd) == sizeof(char) || sizeof(sqe->fd) == sizeof(short) || sizeof(sqe->fd) == sizeof(int) || sizeof(sqe->fd) == sizeof(long)) || sizeof(sqe->fd) == sizeof(long long))) __compiletime_assert_1793(); } while (0); ({ typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) __x = (*(const volatile typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) *)&(sqe->fd)); do { } while (0); (typeof(sqe->fd))__x; }); }); + if (!tmp || tmp > ((unsigned short)~0U)) + return -22; + + memset(p, 0, sizeof(*p)); + p->nbufs = tmp; + p->bgid = ({ do { extern void __compiletime_assert_1794(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->buf_group) == sizeof(char) || sizeof(sqe->buf_group) == sizeof(short) || sizeof(sqe->buf_group) == sizeof(int) || sizeof(sqe->buf_group) == sizeof(long)) || sizeof(sqe->buf_group) == sizeof(long long))) __compiletime_assert_1794(); } while (0); ({ typeof( _Generic((sqe->buf_group), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->buf_group))) __x = (*(const volatile typeof( _Generic((sqe->buf_group), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->buf_group))) *)&(sqe->buf_group)); do { } while (0); (typeof(sqe->buf_group))__x; }); }); + return 0; +} + +static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, + int bgid, unsigned nbufs) +{ + unsigned i = 0; + + + if (!nbufs) + return 0; + + + while (!list_empty(&buf->list)) { + struct io_buffer *nxt; + + nxt = ({ void *__mptr = (void *)((&buf->list)->next); do { extern void __compiletime_assert_1795(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&buf->list)->next)), typeof(((struct io_buffer *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&buf->list)->next)), typeof(void))))) __compiletime_assert_1795(); } while (0); ((struct io_buffer *)(__mptr - __builtin_offsetof(struct io_buffer, list))); }); + list_del(&nxt->list); + kfree(nxt); + if (++i == nbufs) + return i; + } + i++; + kfree(buf); + idr_remove(&ctx->io_buffer_idr, bgid); + + return i; +} + +static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_provide_buf *p = &req->pbuf; + struct io_ring_ctx *ctx = req->ctx; + struct io_buffer *head; + int ret = 0; + + io_ring_submit_lock(ctx, !force_nonblock); + + do { ({ int __ret_warn_on = !!(debug_locks && !lock_is_held(&(&ctx->uring_lock)->dep_map)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1796)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("fs/io_uring.c"), "i" (3526), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1797)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1798)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); + + ret = -2; + head = idr_find(&ctx->io_buffer_idr, p->bgid); + if (head) + ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs); + + io_ring_submit_lock(ctx, !force_nonblock); + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, 0, cs); + return 0; +} + +static int io_provide_buffers_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_provide_buf *p = &req->pbuf; + u64 tmp; + + if (sqe->ioprio || sqe->rw_flags) + return -22; + + tmp = ({ do { extern void __compiletime_assert_1799(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fd) == sizeof(char) || sizeof(sqe->fd) == sizeof(short) || sizeof(sqe->fd) == sizeof(int) || sizeof(sqe->fd) == sizeof(long)) || sizeof(sqe->fd) == sizeof(long long))) __compiletime_assert_1799(); } while (0); ({ typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) __x = (*(const volatile typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) *)&(sqe->fd)); do { } while (0); (typeof(sqe->fd))__x; }); }); + if (!tmp || tmp > ((unsigned short)~0U)) + return -7; + p->nbufs = tmp; + p->addr = ({ do { extern void __compiletime_assert_1800(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1800(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }); + p->len = ({ do { extern void __compiletime_assert_1801(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1801(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + + if (!({ ({ int __ret_warn_on = !!(!(!(preempt_count() & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8)) | (1UL << (0 + 8))))) && !pagefault_disabled()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1802)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("fs/io_uring.c"), "i" (3556), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1803)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1804)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); __builtin_expect(!!(!({ (void)0; __chk_range_not_ok((unsigned long )(( { ({ u64 __dummy; typeof((p->addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(p->addr); } )), (p->len * p->nbufs), (get_current()->thread.addr_limit.seg)); })), 1); })) + return -14; + + p->bgid = ({ do { extern void __compiletime_assert_1805(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->buf_group) == sizeof(char) || sizeof(sqe->buf_group) == sizeof(short) || sizeof(sqe->buf_group) == sizeof(int) || sizeof(sqe->buf_group) == sizeof(long)) || sizeof(sqe->buf_group) == sizeof(long long))) __compiletime_assert_1805(); } while (0); ({ typeof( _Generic((sqe->buf_group), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->buf_group))) __x = (*(const volatile typeof( _Generic((sqe->buf_group), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->buf_group))) *)&(sqe->buf_group)); do { } while (0); (typeof(sqe->buf_group))__x; }); }); + tmp = ({ do { extern void __compiletime_assert_1806(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1806(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + if (tmp > ((unsigned short)~0U)) + return -7; + p->bid = tmp; + return 0; +} + +static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) +{ + struct io_buffer *buf; + u64 addr = pbuf->addr; + int i, bid = pbuf->bid; + + for (i = 0; i < pbuf->nbufs; i++) { + buf = kmalloc(sizeof(*buf), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!buf) + break; + + buf->addr = addr; + buf->len = pbuf->len; + buf->bid = bid; + addr += pbuf->len; + bid++; + if (!*head) { + INIT_LIST_HEAD(&buf->list); + *head = buf; + } else { + list_add_tail(&buf->list, &(*head)->list); + } + } + + return i ? i : -12; +} + +static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_provide_buf *p = &req->pbuf; + struct io_ring_ctx *ctx = req->ctx; + struct io_buffer *head, *list; + int ret = 0; + + io_ring_submit_lock(ctx, !force_nonblock); + + do { ({ int __ret_warn_on = !!(debug_locks && !lock_is_held(&(&ctx->uring_lock)->dep_map)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1807)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("fs/io_uring.c"), "i" (3604), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1808)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1809)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); + + list = head = idr_find(&ctx->io_buffer_idr, p->bgid); + + ret = io_add_buffers(p, &head); + if (ret < 0) + goto out; + + if (!list) { + ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1, + ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (ret < 0) { + __io_remove_buffers(ctx, head, p->bgid, -1U); + goto out; + } + } +out: + io_ring_submit_unlock(ctx, !force_nonblock); + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, 0, cs); + return 0; +} + +static int io_epoll_ctl_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + + if (sqe->ioprio || sqe->buf_index) + return -22; + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + + req->epoll.epfd = ({ do { extern void __compiletime_assert_1810(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fd) == sizeof(char) || sizeof(sqe->fd) == sizeof(short) || sizeof(sqe->fd) == sizeof(int) || sizeof(sqe->fd) == sizeof(long)) || sizeof(sqe->fd) == sizeof(long long))) __compiletime_assert_1810(); } while (0); ({ typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) __x = (*(const volatile typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) *)&(sqe->fd)); do { } while (0); (typeof(sqe->fd))__x; }); }); + req->epoll.op = ({ do { extern void __compiletime_assert_1811(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1811(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + req->epoll.fd = ({ do { extern void __compiletime_assert_1812(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1812(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + + if (ep_op_has_event(req->epoll.op)) { + struct epoll_event *ev; + + ev = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1813(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1813(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1813(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1813(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); })); } ); + if (copy_from_user(&req->epoll.event, ev, sizeof(*ev))) + return -14; + } + + return 0; + + + +} + +static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + + struct io_epoll *ie = &req->epoll; + int ret; + + ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock); + if (force_nonblock && ret == -11) + return -11; + + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, 0, cs); + return 0; + + + +} + +static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + + if (sqe->ioprio || sqe->buf_index || sqe->off) + return -22; + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + + req->madvise.addr = ({ do { extern void __compiletime_assert_1814(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1814(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }); + req->madvise.len = ({ do { extern void __compiletime_assert_1815(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1815(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + req->madvise.advice = ({ do { extern void __compiletime_assert_1816(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fadvise_advice) == sizeof(char) || sizeof(sqe->fadvise_advice) == sizeof(short) || sizeof(sqe->fadvise_advice) == sizeof(int) || sizeof(sqe->fadvise_advice) == sizeof(long)) || sizeof(sqe->fadvise_advice) == sizeof(long long))) __compiletime_assert_1816(); } while (0); ({ typeof( _Generic((sqe->fadvise_advice), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fadvise_advice))) __x = (*(const volatile typeof( _Generic((sqe->fadvise_advice), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fadvise_advice))) *)&(sqe->fadvise_advice)); do { } while (0); (typeof(sqe->fadvise_advice))__x; }); }); + return 0; + + + +} + +static int io_madvise(struct io_kiocb *req, bool force_nonblock) +{ + + struct io_madvise *ma = &req->madvise; + int ret; + + if (force_nonblock) + return -11; + + ret = do_madvise(ma->addr, ma->len, ma->advice); + if (ret < 0) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; + + + +} + +static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + if (sqe->ioprio || sqe->buf_index || sqe->addr) + return -22; + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + + req->fadvise.offset = ({ do { extern void __compiletime_assert_1817(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1817(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + req->fadvise.len = ({ do { extern void __compiletime_assert_1818(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1818(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + req->fadvise.advice = ({ do { extern void __compiletime_assert_1819(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fadvise_advice) == sizeof(char) || sizeof(sqe->fadvise_advice) == sizeof(short) || sizeof(sqe->fadvise_advice) == sizeof(int) || sizeof(sqe->fadvise_advice) == sizeof(long)) || sizeof(sqe->fadvise_advice) == sizeof(long long))) __compiletime_assert_1819(); } while (0); ({ typeof( _Generic((sqe->fadvise_advice), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fadvise_advice))) __x = (*(const volatile typeof( _Generic((sqe->fadvise_advice), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fadvise_advice))) *)&(sqe->fadvise_advice)); do { } while (0); (typeof(sqe->fadvise_advice))__x; }); }); + return 0; +} + +static int io_fadvise(struct io_kiocb *req, bool force_nonblock) +{ + struct io_fadvise *fa = &req->fadvise; + int ret; + + if (force_nonblock) { + switch (fa->advice) { + case 0: + case 1: + case 2: + break; + default: + return -11; + } + } + + ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); + if (ret < 0) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + +static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + if (sqe->ioprio || sqe->buf_index) + return -22; + if (req->flags & REQ_F_FIXED_FILE) + return -9; + + req->statx.dfd = ({ do { extern void __compiletime_assert_1820(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fd) == sizeof(char) || sizeof(sqe->fd) == sizeof(short) || sizeof(sqe->fd) == sizeof(int) || sizeof(sqe->fd) == sizeof(long)) || sizeof(sqe->fd) == sizeof(long long))) __compiletime_assert_1820(); } while (0); ({ typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) __x = (*(const volatile typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) *)&(sqe->fd)); do { } while (0); (typeof(sqe->fd))__x; }); }); + req->statx.mask = ({ do { extern void __compiletime_assert_1821(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1821(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + req->statx.filename = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1822(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1822(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1822(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1822(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); })); } ); + req->statx.buffer = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1823(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr2) == sizeof(char) || sizeof(sqe->addr2) == sizeof(short) || sizeof(sqe->addr2) == sizeof(int) || sizeof(sqe->addr2) == sizeof(long)) || sizeof(sqe->addr2) == sizeof(long long))) __compiletime_assert_1823(); } while (0); ({ typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) __x = (*(const volatile typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) *)&(sqe->addr2)); do { } while (0); (typeof(sqe->addr2))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1823(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr2) == sizeof(char) || sizeof(sqe->addr2) == sizeof(short) || sizeof(sqe->addr2) == sizeof(int) || sizeof(sqe->addr2) == sizeof(long)) || sizeof(sqe->addr2) == sizeof(long long))) __compiletime_assert_1823(); } while (0); ({ typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) __x = (*(const volatile typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) *)&(sqe->addr2)); do { } while (0); (typeof(sqe->addr2))__x; }); })); } ); + req->statx.flags = ({ do { extern void __compiletime_assert_1824(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->statx_flags) == sizeof(char) || sizeof(sqe->statx_flags) == sizeof(short) || sizeof(sqe->statx_flags) == sizeof(int) || sizeof(sqe->statx_flags) == sizeof(long)) || sizeof(sqe->statx_flags) == sizeof(long long))) __compiletime_assert_1824(); } while (0); ({ typeof( _Generic((sqe->statx_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->statx_flags))) __x = (*(const volatile typeof( _Generic((sqe->statx_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->statx_flags))) *)&(sqe->statx_flags)); do { } while (0); (typeof(sqe->statx_flags))__x; }); }); + + return 0; +} + +static int io_statx(struct io_kiocb *req, bool force_nonblock) +{ + struct io_statx *ctx = &req->statx; + int ret; + + if (force_nonblock) { + + if (ctx->dfd == -1 || ctx->dfd == -100) + req->flags |= REQ_F_NO_FILE_TABLE; + return -11; + } + + ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask, + ctx->buffer); + + if (ret < 0) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + +static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + + + + + + io_req_init_async(req); + req->work.flags |= IO_WQ_WORK_NO_CANCEL; + + if (__builtin_expect(!!(req->ctx->flags & ((1U << 0)|(1U << 1))), 0)) + return -22; + if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || + sqe->rw_flags || sqe->buf_index) + return -22; + if (req->flags & REQ_F_FIXED_FILE) + return -9; + + req->close.fd = ({ do { extern void __compiletime_assert_1825(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fd) == sizeof(char) || sizeof(sqe->fd) == sizeof(short) || sizeof(sqe->fd) == sizeof(int) || sizeof(sqe->fd) == sizeof(long)) || sizeof(sqe->fd) == sizeof(long long))) __compiletime_assert_1825(); } while (0); ({ typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) __x = (*(const volatile typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) *)&(sqe->fd)); do { } while (0); (typeof(sqe->fd))__x; }); }); + if ((req->file && req->file->f_op == &io_uring_fops) || + req->close.fd == req->ctx->ring_fd) + return -9; + + req->close.put_file = ((void *)0); + return 0; +} + +static int io_close(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_close *close = &req->close; + int ret; + + + if (!close->put_file) { + ret = __close_fd_get_file(close->fd, &close->put_file); + if (ret < 0) + return (ret == -2) ? -9 : ret; + } + + + if (close->put_file->f_op->flush && force_nonblock) { + + req->flags &= ~REQ_F_NOWAIT; + + req->flags |= REQ_F_NO_FILE_TABLE; + return -11; + } + + + ret = filp_close(close->put_file, req->work.files); + if (ret < 0) + req_set_fail_links(req); + fput(close->put_file); + close->put_file = ((void *)0); + __io_req_complete(req, ret, 0, cs); + return 0; +} + +static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!req->file) + return -9; + + if (__builtin_expect(!!(ctx->flags & (1U << 0)), 0)) + return -22; + if (__builtin_expect(!!(sqe->addr || sqe->ioprio || sqe->buf_index), 0)) + return -22; + + req->sync.off = ({ do { extern void __compiletime_assert_1826(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1826(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + req->sync.len = ({ do { extern void __compiletime_assert_1827(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1827(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + req->sync.flags = ({ do { extern void __compiletime_assert_1828(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->sync_range_flags) == sizeof(char) || sizeof(sqe->sync_range_flags) == sizeof(short) || sizeof(sqe->sync_range_flags) == sizeof(int) || sizeof(sqe->sync_range_flags) == sizeof(long)) || sizeof(sqe->sync_range_flags) == sizeof(long long))) __compiletime_assert_1828(); } while (0); ({ typeof( _Generic((sqe->sync_range_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->sync_range_flags))) __x = (*(const volatile typeof( _Generic((sqe->sync_range_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->sync_range_flags))) *)&(sqe->sync_range_flags)); do { } while (0); (typeof(sqe->sync_range_flags))__x; }); }); + return 0; +} + +static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) +{ + int ret; + + + if (force_nonblock) + return -11; + + ret = sync_file_range(req->file, req->sync.off, req->sync.len, + req->sync.flags); + if (ret < 0) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + + +static int io_setup_async_msg(struct io_kiocb *req, + struct io_async_msghdr *kmsg) +{ + if (req->io) + return -11; + if (io_alloc_async_ctx(req)) { + if (kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); + return -12; + } + req->flags |= REQ_F_NEED_CLEANUP; + memcpy(&req->io->msg, kmsg, sizeof(*kmsg)); + return -11; +} + +static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_sr_msg *sr = &req->sr_msg; + struct io_async_ctx *io = req->io; + int ret; + + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + + sr->msg_flags = ({ do { extern void __compiletime_assert_1829(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->msg_flags) == sizeof(char) || sizeof(sqe->msg_flags) == sizeof(short) || sizeof(sqe->msg_flags) == sizeof(int) || sizeof(sqe->msg_flags) == sizeof(long)) || sizeof(sqe->msg_flags) == sizeof(long long))) __compiletime_assert_1829(); } while (0); ({ typeof( _Generic((sqe->msg_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->msg_flags))) __x = (*(const volatile typeof( _Generic((sqe->msg_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->msg_flags))) *)&(sqe->msg_flags)); do { } while (0); (typeof(sqe->msg_flags))__x; }); }); + sr->msg = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1830(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1830(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1830(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1830(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); })); } ); + sr->len = ({ do { extern void __compiletime_assert_1831(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1831(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + + + if (req->ctx->compat) + sr->msg_flags |= 0x80000000; + + + if (!io || req->opcode == IORING_OP_SEND) + return 0; + + if (req->flags & REQ_F_NEED_CLEANUP) + return 0; + + io->msg.iov = io->msg.fast_iov; + ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags, + &io->msg.iov); + if (!ret) + req->flags |= REQ_F_NEED_CLEANUP; + return ret; +} + +static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_async_msghdr *kmsg = ((void *)0); + struct socket *sock; + int ret; + + sock = sock_from_file(req->file, &ret); + if (sock) { + struct io_async_ctx io; + unsigned flags; + + if (req->io) { + kmsg = &req->io->msg; + kmsg->msg.msg_name = &req->io->msg.addr; + + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; + } else { + struct io_sr_msg *sr = &req->sr_msg; + + kmsg = &io.msg; + kmsg->msg.msg_name = &io.msg.addr; + + io.msg.iov = io.msg.fast_iov; + ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg, + sr->msg_flags, &io.msg.iov); + if (ret) + return ret; + } + + flags = req->sr_msg.msg_flags; + if (flags & 0x40) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= 0x40; + + ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); + if (force_nonblock && ret == -11) + return io_setup_async_msg(req, kmsg); + if (ret == -512) + ret = -4; + } + + if (kmsg && kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); + req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, 0, cs); + return 0; +} + +static int io_send(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct socket *sock; + int ret; + + sock = sock_from_file(req->file, &ret); + if (sock) { + struct io_sr_msg *sr = &req->sr_msg; + struct msghdr msg; + struct iovec iov; + unsigned flags; + + ret = import_single_range(1, sr->buf, sr->len, &iov, + &msg.msg_iter); + if (ret) + return ret; + + msg.msg_name = ((void *)0); + msg.msg_control = ((void *)0); + msg.msg_controllen = 0; + msg.msg_namelen = 0; + + flags = req->sr_msg.msg_flags; + if (flags & 0x40) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= 0x40; + + msg.msg_flags = flags; + ret = sock_sendmsg(sock, &msg); + if (force_nonblock && ret == -11) + return -11; + if (ret == -512) + ret = -4; + } + + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, 0, cs); + return 0; +} + +static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io) +{ + struct io_sr_msg *sr = &req->sr_msg; + struct iovec *uiov; + size_t iov_len; + int ret; + + ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr, + &uiov, &iov_len); + if (ret) + return ret; + + if (req->flags & REQ_F_BUFFER_SELECT) { + if (iov_len > 1) + return -22; + if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov))) + return -14; + sr->len = io->msg.iov[0].iov_len; + iov_iter_init(&io->msg.msg.msg_iter, 0, io->msg.iov, 1, + sr->len); + io->msg.iov = ((void *)0); + } else { + ret = import_iovec(0, uiov, iov_len, 8, + &io->msg.iov, &io->msg.msg.msg_iter); + if (ret > 0) + ret = 0; + } + + return ret; +} + + +static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, + struct io_async_ctx *io) +{ + struct compat_msghdr *msg_compat; + struct io_sr_msg *sr = &req->sr_msg; + struct compat_iovec *uiov; + compat_uptr_t ptr; + compat_size_t len; + int ret; + + msg_compat = (struct compat_msghdr *) sr->msg; + ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr, + &ptr, &len); + if (ret) + return ret; + + uiov = compat_ptr(ptr); + if (req->flags & REQ_F_BUFFER_SELECT) { + compat_ssize_t clen; + + if (len > 1) + return -22; + if (!({ ({ int __ret_warn_on = !!(!(!(preempt_count() & ((((1UL << (4))-1) << (((0 + 8) + 8) + 4)) | (((1UL << (4))-1) << ((0 + 8) + 8)) | (1UL << (0 + 8))))) && !pagefault_disabled()); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1832)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("fs/io_uring.c"), "i" (4078), "i" ((1 << 0)|((1 << 1) | ((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1833)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1834)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); __builtin_expect(!!(!({ (void)0; __chk_range_not_ok((unsigned long )(uiov), sizeof(*uiov), (get_current()->thread.addr_limit.seg)); })), 1); })) + return -14; + if (({ int __gu_err; __typeof__( __builtin_choose_expr(sizeof(*((&uiov->iov_len)))<=sizeof(char),(unsigned char)0,__builtin_choose_expr(sizeof(*((&uiov->iov_len)))<=sizeof(short),(unsigned short)0,__builtin_choose_expr(sizeof(*((&uiov->iov_len)))<=sizeof(int),(unsigned int)0,__builtin_choose_expr(sizeof(*((&uiov->iov_len)))<=sizeof(long),(unsigned long)0,0ULL))))) __gu_val; __typeof__((&uiov->iov_len)) __gu_ptr = ((&uiov->iov_len)); __typeof__(sizeof(*(&uiov->iov_len))) __gu_size = (sizeof(*(&uiov->iov_len))); ({ stac(); asm volatile ("# ALT: oldnstr\n" "661:\n\t" "" "\n662:\n" "# ALT: padding\n" ".skip -(((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")) > 0) * " "((" "665""1""f-""664""1""f" ")-(" "662b-661b" ")),0x90\n" "663" ":\n" ".pushsection .altinstructions,\"a\"\n" " .long 661b - .\n" " .long " "664""1""f - .\n" " .word " "( 3*32+18)" "\n" " .byte " "663""b-661b" "\n" " .byte " "665""1""f-""664""1""f" "\n" " .byte " "663""b-662b" "\n" ".popsection\n" ".pushsection .altinstr_replacement, \"ax\"\n" "# ALT: replacement " "1" "\n" "664""1"":\n\t" "lfence" "\n" "665""1" ":\n" ".popsection\n" : : : "memory"); }); do { __gu_err = 0; (void)0; switch (__gu_size) { case 1: asm volatile("\n" "1: mov""b"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""b"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=q"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 2: asm volatile("\n" "1: mov""w"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""w"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 4: asm volatile("\n" "1: mov""l"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""l"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; case 8: asm volatile("\n" "1: mov""q"" %[umem],%[output]\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %[efault],%[errout]\n" " xor""q"" %[output],%[output]\n" " jmp 2b\n" ".previous\n" " .pushsection \"__ex_table\",\"a\"\n" " .balign 4\n" " .long (" "1b" ") - .\n" " .long (" "3b" ") - .\n" " .long (" "ex_handler_uaccess" ") - .\n" " .popsection\n" : [errout] "=r" (__gu_err), [output] "=r"(__gu_val) : [umem] "m" ((*(struct __large_struct *)(__gu_ptr))), [efault] "i" (-14), "0" (__gu_err)); break; default: (__gu_val) = __get_user_bad(); } } while (0); clac(); ((clen)) = ( __typeof__(*((&uiov->iov_len))))__gu_val; __builtin_expect(__gu_err, 0); })) + return -14; + if (clen < 0) + return -22; + sr->len = io->msg.iov[0].iov_len; + io->msg.iov = ((void *)0); + } else { + ret = compat_import_iovec(0, uiov, len, 8, + &io->msg.iov, + &io->msg.msg.msg_iter); + if (ret < 0) + return ret; + } + + return 0; +} + + +static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io) +{ + io->msg.iov = io->msg.fast_iov; + + + if (req->ctx->compat) + return __io_compat_recvmsg_copy_hdr(req, io); + + + return __io_recvmsg_copy_hdr(req, io); +} + +static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, + int *cflags, bool needs_lock) +{ + struct io_sr_msg *sr = &req->sr_msg; + struct io_buffer *kbuf; + + if (!(req->flags & REQ_F_BUFFER_SELECT)) + return ((void *)0); + + kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); + if (IS_ERR(kbuf)) + return kbuf; + + sr->kbuf = kbuf; + req->flags |= REQ_F_BUFFER_SELECTED; + + *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT; + *cflags |= (1U << 0); + return kbuf; +} + +static int io_recvmsg_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + struct io_sr_msg *sr = &req->sr_msg; + struct io_async_ctx *io = req->io; + int ret; + + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + + sr->msg_flags = ({ do { extern void __compiletime_assert_1835(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->msg_flags) == sizeof(char) || sizeof(sqe->msg_flags) == sizeof(short) || sizeof(sqe->msg_flags) == sizeof(int) || sizeof(sqe->msg_flags) == sizeof(long)) || sizeof(sqe->msg_flags) == sizeof(long long))) __compiletime_assert_1835(); } while (0); ({ typeof( _Generic((sqe->msg_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->msg_flags))) __x = (*(const volatile typeof( _Generic((sqe->msg_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->msg_flags))) *)&(sqe->msg_flags)); do { } while (0); (typeof(sqe->msg_flags))__x; }); }); + sr->msg = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1836(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1836(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1836(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1836(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); })); } ); + sr->len = ({ do { extern void __compiletime_assert_1837(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1837(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + sr->bgid = ({ do { extern void __compiletime_assert_1838(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->buf_group) == sizeof(char) || sizeof(sqe->buf_group) == sizeof(short) || sizeof(sqe->buf_group) == sizeof(int) || sizeof(sqe->buf_group) == sizeof(long)) || sizeof(sqe->buf_group) == sizeof(long long))) __compiletime_assert_1838(); } while (0); ({ typeof( _Generic((sqe->buf_group), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->buf_group))) __x = (*(const volatile typeof( _Generic((sqe->buf_group), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->buf_group))) *)&(sqe->buf_group)); do { } while (0); (typeof(sqe->buf_group))__x; }); }); + + + if (req->ctx->compat) + sr->msg_flags |= 0x80000000; + + + if (!io || req->opcode == IORING_OP_RECV) + return 0; + + if (req->flags & REQ_F_NEED_CLEANUP) + return 0; + + ret = io_recvmsg_copy_hdr(req, io); + if (!ret) + req->flags |= REQ_F_NEED_CLEANUP; + return ret; +} + +static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_async_msghdr *kmsg = ((void *)0); + struct socket *sock; + int ret, cflags = 0; + + sock = sock_from_file(req->file, &ret); + if (sock) { + struct io_buffer *kbuf; + struct io_async_ctx io; + unsigned flags; + + if (req->io) { + kmsg = &req->io->msg; + kmsg->msg.msg_name = &req->io->msg.addr; + + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; + } else { + kmsg = &io.msg; + kmsg->msg.msg_name = &io.msg.addr; + + ret = io_recvmsg_copy_hdr(req, &io); + if (ret) + return ret; + } + + kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock); + if (IS_ERR(kbuf)) { + return PTR_ERR(kbuf); + } else if (kbuf) { + kmsg->fast_iov[0].iov_base = ( { ({ u64 __dummy; typeof((kbuf->addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(kbuf->addr); } ); + iov_iter_init(&kmsg->msg.msg_iter, 0, kmsg->iov, + 1, req->sr_msg.len); + } + + flags = req->sr_msg.msg_flags; + if (flags & 0x40) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= 0x40; + + ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg, + kmsg->uaddr, flags); + if (force_nonblock && ret == -11) + return io_setup_async_msg(req, kmsg); + if (ret == -512) + ret = -4; + } + + if (kmsg && kmsg->iov != kmsg->fast_iov) + kfree(kmsg->iov); + req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, cflags, cs); + return 0; +} + +static int io_recv(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_buffer *kbuf = ((void *)0); + struct socket *sock; + int ret, cflags = 0; + + sock = sock_from_file(req->file, &ret); + if (sock) { + struct io_sr_msg *sr = &req->sr_msg; + void *buf = sr->buf; + struct msghdr msg; + struct iovec iov; + unsigned flags; + + kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); + else if (kbuf) + buf = ( { ({ u64 __dummy; typeof((kbuf->addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(kbuf->addr); } ); + + ret = import_single_range(0, buf, sr->len, &iov, + &msg.msg_iter); + if (ret) { + kfree(kbuf); + return ret; + } + + req->flags |= REQ_F_NEED_CLEANUP; + msg.msg_name = ((void *)0); + msg.msg_control = ((void *)0); + msg.msg_controllen = 0; + msg.msg_namelen = 0; + msg.msg_iocb = ((void *)0); + msg.msg_flags = 0; + + flags = req->sr_msg.msg_flags; + if (flags & 0x40) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= 0x40; + + ret = sock_recvmsg(sock, &msg, flags); + if (force_nonblock && ret == -11) + return -11; + if (ret == -512) + ret = -4; + } + + kfree(kbuf); + req->flags &= ~REQ_F_NEED_CLEANUP; + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, cflags, cs); + return 0; +} + +static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_accept *accept = &req->accept; + + if (__builtin_expect(!!(req->ctx->flags & ((1U << 0)|(1U << 1))), 0)) + return -22; + if (sqe->ioprio || sqe->len || sqe->buf_index) + return -22; + + accept->addr = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1839(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1839(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1839(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1839(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); })); } ); + accept->addr_len = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1840(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr2) == sizeof(char) || sizeof(sqe->addr2) == sizeof(short) || sizeof(sqe->addr2) == sizeof(int) || sizeof(sqe->addr2) == sizeof(long)) || sizeof(sqe->addr2) == sizeof(long long))) __compiletime_assert_1840(); } while (0); ({ typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) __x = (*(const volatile typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) *)&(sqe->addr2)); do { } while (0); (typeof(sqe->addr2))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1840(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr2) == sizeof(char) || sizeof(sqe->addr2) == sizeof(short) || sizeof(sqe->addr2) == sizeof(int) || sizeof(sqe->addr2) == sizeof(long)) || sizeof(sqe->addr2) == sizeof(long long))) __compiletime_assert_1840(); } while (0); ({ typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) __x = (*(const volatile typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) *)&(sqe->addr2)); do { } while (0); (typeof(sqe->addr2))__x; }); })); } ); + accept->flags = ({ do { extern void __compiletime_assert_1841(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->accept_flags) == sizeof(char) || sizeof(sqe->accept_flags) == sizeof(short) || sizeof(sqe->accept_flags) == sizeof(int) || sizeof(sqe->accept_flags) == sizeof(long)) || sizeof(sqe->accept_flags) == sizeof(long long))) __compiletime_assert_1841(); } while (0); ({ typeof( _Generic((sqe->accept_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->accept_flags))) __x = (*(const volatile typeof( _Generic((sqe->accept_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->accept_flags))) *)&(sqe->accept_flags)); do { } while (0); (typeof(sqe->accept_flags))__x; }); }); + accept->nofile = rlimit(7); + return 0; +} + +static int io_accept(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_accept *accept = &req->accept; + unsigned int file_flags = force_nonblock ? 00004000 : 0; + int ret; + + if (req->file->f_flags & 00004000) + req->flags |= REQ_F_NOWAIT; + + ret = __sys_accept4_file(req->file, file_flags, accept->addr, + accept->addr_len, accept->flags, + accept->nofile); + if (ret == -11 && force_nonblock) + return -11; + if (ret < 0) { + if (ret == -512) + ret = -4; + req_set_fail_links(req); + } + __io_req_complete(req, ret, 0, cs); + return 0; +} + +static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_connect *conn = &req->connect; + struct io_async_ctx *io = req->io; + + if (__builtin_expect(!!(req->ctx->flags & ((1U << 0)|(1U << 1))), 0)) + return -22; + if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags) + return -22; + + conn->addr = ( { ({ u64 __dummy; typeof((({ do { extern void __compiletime_assert_1842(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1842(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }))) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(({ do { extern void __compiletime_assert_1842(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1842(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); })); } ); + conn->addr_len = ({ do { extern void __compiletime_assert_1843(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr2) == sizeof(char) || sizeof(sqe->addr2) == sizeof(short) || sizeof(sqe->addr2) == sizeof(int) || sizeof(sqe->addr2) == sizeof(long)) || sizeof(sqe->addr2) == sizeof(long long))) __compiletime_assert_1843(); } while (0); ({ typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) __x = (*(const volatile typeof( _Generic((sqe->addr2), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr2))) *)&(sqe->addr2)); do { } while (0); (typeof(sqe->addr2))__x; }); }); + + if (!io) + return 0; + + return move_addr_to_kernel(conn->addr, conn->addr_len, + &io->connect.address); +} + +static int io_connect(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_async_ctx __io, *io; + unsigned file_flags; + int ret; + + if (req->io) { + io = req->io; + } else { + ret = move_addr_to_kernel(req->connect.addr, + req->connect.addr_len, + &__io.connect.address); + if (ret) + goto out; + io = &__io; + } + + file_flags = force_nonblock ? 00004000 : 0; + + ret = __sys_connect_file(req->file, &io->connect.address, + req->connect.addr_len, file_flags); + if ((ret == -11 || ret == -115) && force_nonblock) { + if (req->io) + return -11; + if (io_alloc_async_ctx(req)) { + ret = -12; + goto out; + } + memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect)); + return -11; + } + if (ret == -512) + ret = -4; +out: + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, 0, cs); + return 0; +} +# 4440 "fs/io_uring.c" +struct io_poll_table { + struct poll_table_struct pt; + struct io_kiocb *req; + int error; +}; + +static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, + __poll_t mask, task_work_func_t func) +{ + int ret; + + + if (mask && !(mask & poll->events)) + return 0; + + trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); + + list_del_init(&poll->wait.entry); + + req->result = mask; + init_task_work(&req->task_work, func); + + + + + + + ret = io_req_task_work_add(req, &req->task_work); + if (__builtin_expect(!!(ret), 0)) { + struct task_struct *tsk; + + do { do { extern void __compiletime_assert_1844(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(poll->canceled) == sizeof(char) || sizeof(poll->canceled) == sizeof(short) || sizeof(poll->canceled) == sizeof(int) || sizeof(poll->canceled) == sizeof(long)) || sizeof(poll->canceled) == sizeof(long long))) __compiletime_assert_1844(); } while (0); do { *(volatile typeof(poll->canceled) *)&(poll->canceled) = (true); } while (0); } while (0); + tsk = io_wq_get_task(req->ctx->io_wq); + task_work_add(tsk, &req->task_work, 0); + wake_up_process(tsk); + } + return 1; +} + +static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) + +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!req->result && !({ do { extern void __compiletime_assert_1845(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(poll->canceled) == sizeof(char) || sizeof(poll->canceled) == sizeof(short) || sizeof(poll->canceled) == sizeof(int) || sizeof(poll->canceled) == sizeof(long)) || sizeof(poll->canceled) == sizeof(long long))) __compiletime_assert_1845(); } while (0); ({ typeof( _Generic((poll->canceled), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (poll->canceled))) __x = (*(const volatile typeof( _Generic((poll->canceled), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (poll->canceled))) *)&(poll->canceled)); do { } while (0); (typeof(poll->canceled))__x; }); })) { + struct poll_table_struct pt = { ._key = poll->events }; + + req->result = vfs_poll(req->file, &pt) & poll->events; + } + + spin_lock_irq(&ctx->completion_lock); + if (!req->result && !({ do { extern void __compiletime_assert_1846(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(poll->canceled) == sizeof(char) || sizeof(poll->canceled) == sizeof(short) || sizeof(poll->canceled) == sizeof(int) || sizeof(poll->canceled) == sizeof(long)) || sizeof(poll->canceled) == sizeof(long long))) __compiletime_assert_1846(); } while (0); ({ typeof( _Generic((poll->canceled), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (poll->canceled))) __x = (*(const volatile typeof( _Generic((poll->canceled), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (poll->canceled))) *)&(poll->canceled)); do { } while (0); (typeof(poll->canceled))__x; }); })) { + add_wait_queue(poll->head, &poll->wait); + return true; + } + + return false; +} + +static void io_poll_remove_double(struct io_kiocb *req) +{ + struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + + do { ({ int __ret_warn_on = !!(debug_locks && !lock_is_held(&(&req->ctx->completion_lock)->dep_map)); if (__builtin_expect(!!(__ret_warn_on), 0)) do { ({ asm volatile("%c0:\n\t" ".pushsection .discard.instr_begin\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1847)); }); do { asm volatile("1:\t" ".byte 0x0f, 0x0b" "\n" ".pushsection __bug_table,\"aw\"\n" "2:\t" ".long " "1b" " - 2b" "\t# bug_entry::bug_addr\n" "\t" ".long " "%c0" " - 2b" "\t# bug_entry::file\n" "\t.word %c1" "\t# bug_entry::line\n" "\t.word %c2" "\t# bug_entry::flags\n" "\t.org 2b+%c3\n" ".popsection" : : "i" ("fs/io_uring.c"), "i" (4503), "i" ((1 << 0)|(((9) << 8))), "i" (sizeof(struct bug_entry))); } while (0); ({ asm volatile("%c0:\n\t" ".pushsection .discard.reachable\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1848)); }); ({ asm volatile("%c0: nop\n\t" ".pushsection .discard.instr_end\n\t" ".long %c0b - .\n\t" ".popsection\n\t" : : "i" (1849)); }); } while (0); __builtin_expect(!!(__ret_warn_on), 0); }); } while (0); + + if (poll && poll->head) { + struct wait_queue_head *head = poll->head; + + spin_lock(&head->lock); + list_del_init(&poll->wait.entry); + if (poll->wait.private) + refcount_dec(&req->refs); + poll->head = ((void *)0); + spin_unlock(&head->lock); + } +} + +static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) +{ + struct io_ring_ctx *ctx = req->ctx; + + io_poll_remove_double(req); + req->poll.done = true; + io_cqring_fill_event(req, error ? error : mangle_poll(mask)); + io_commit_cqring(ctx); +} + +static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (io_poll_rewait(req, &req->poll)) { + spin_unlock_irq(&ctx->completion_lock); + return; + } + + hash_del(&req->hash_node); + io_poll_complete(req, req->result, 0); + req->flags |= REQ_F_COMP_LOCKED; + *nxt = io_put_req_find_next(req); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); +} + +static void io_poll_task_func(struct callback_head *cb) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(cb); do { extern void __compiletime_assert_1850(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(cb)), typeof(((struct io_kiocb *)0)->task_work)) && !__builtin_types_compatible_p(typeof(*(cb)), typeof(void))))) __compiletime_assert_1850(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, task_work))); }); + struct io_kiocb *nxt = ((void *)0); + + io_poll_task_handler(req, &nxt); + if (nxt) + __io_req_task_submit(nxt); +} + +static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, + int sync, void *key) +{ + struct io_kiocb *req = wait->private; + struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + __poll_t mask = (( __poll_t)(uintptr_t)(void *)(key)); + + + if (mask && !(mask & poll->events)) + return 0; + + if (req->poll.head) { + bool done; + + spin_lock(&req->poll.head->lock); + done = list_empty(&req->poll.wait.entry); + if (!done) + list_del_init(&req->poll.wait.entry); + spin_unlock(&req->poll.head->lock); + if (!done) + __io_async_wake(req, poll, mask, io_poll_task_func); + } + refcount_dec(&req->refs); + return 1; +} + +static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, + wait_queue_func_t wake_func) +{ + poll->head = ((void *)0); + poll->done = false; + poll->canceled = false; + poll->events = events; + INIT_LIST_HEAD(&poll->wait.entry); + init_waitqueue_func_entry(&poll->wait, wake_func); +} + +static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, + struct wait_queue_head *head) +{ + struct io_kiocb *req = pt->req; + + + + + + + if (__builtin_expect(!!(poll->head), 0)) { + + if (req->io) { + pt->error = -22; + return; + } + poll = kmalloc(sizeof(*poll), ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); + if (!poll) { + pt->error = -12; + return; + } + io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake); + refcount_inc(&req->refs); + poll->wait.private = req; + req->io = (void *) poll; + } + + pt->error = 0; + poll->head = head; + + if (poll->events & (( __poll_t)(1U << 28))) + add_wait_queue_exclusive(head, &poll->wait); + else + add_wait_queue(head, &poll->wait); +} + +static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, + struct poll_table_struct *p) +{ + struct io_poll_table *pt = ({ void *__mptr = (void *)(p); do { extern void __compiletime_assert_1851(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(p)), typeof(((struct io_poll_table *)0)->pt)) && !__builtin_types_compatible_p(typeof(*(p)), typeof(void))))) __compiletime_assert_1851(); } while (0); ((struct io_poll_table *)(__mptr - __builtin_offsetof(struct io_poll_table, pt))); }); + + __io_queue_proc(&pt->req->apoll->poll, pt, head); +} + +static void io_async_task_func(struct callback_head *cb) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(cb); do { extern void __compiletime_assert_1852(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(cb)), typeof(((struct io_kiocb *)0)->task_work)) && !__builtin_types_compatible_p(typeof(*(cb)), typeof(void))))) __compiletime_assert_1852(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, task_work))); }); + struct async_poll *apoll = req->apoll; + struct io_ring_ctx *ctx = req->ctx; + + trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); + + if (io_poll_rewait(req, &apoll->poll)) { + spin_unlock_irq(&ctx->completion_lock); + return; + } + + + if (hash_hashed(&req->hash_node)) + hash_del(&req->hash_node); + + spin_unlock_irq(&ctx->completion_lock); + + + if (req->flags & REQ_F_WORK_INITIALIZED) + memcpy(&req->work, &apoll->work, sizeof(req->work)); + + if (!({ do { extern void __compiletime_assert_1853(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(apoll->poll.canceled) == sizeof(char) || sizeof(apoll->poll.canceled) == sizeof(short) || sizeof(apoll->poll.canceled) == sizeof(int) || sizeof(apoll->poll.canceled) == sizeof(long)) || sizeof(apoll->poll.canceled) == sizeof(long long))) __compiletime_assert_1853(); } while (0); ({ typeof( _Generic((apoll->poll.canceled), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (apoll->poll.canceled))) __x = (*(const volatile typeof( _Generic((apoll->poll.canceled), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (apoll->poll.canceled))) *)&(apoll->poll.canceled)); do { } while (0); (typeof(apoll->poll.canceled))__x; }); })) + __io_req_task_submit(req); + else + __io_req_task_cancel(req, -125); + + kfree(apoll); +} + +static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, + void *key) +{ + struct io_kiocb *req = wait->private; + struct io_poll_iocb *poll = &req->apoll->poll; + + trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data, + (( __poll_t)(uintptr_t)(void *)(key))); + + return __io_async_wake(req, poll, (( __poll_t)(uintptr_t)(void *)(key)), io_async_task_func); +} + +static void io_poll_req_insert(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct hlist_head *list; + + list = &ctx->cancel_hash[hash_64_generic(req->user_data, ctx->cancel_hash_bits)]; + hlist_add_head(&req->hash_node, list); +} + +static __poll_t __io_arm_poll_handler(struct io_kiocb *req, + struct io_poll_iocb *poll, + struct io_poll_table *ipt, __poll_t mask, + wait_queue_func_t wake_func) + +{ + struct io_ring_ctx *ctx = req->ctx; + bool cancel = false; + + io_init_poll_iocb(poll, mask, wake_func); + poll->file = req->file; + poll->wait.private = req; + + ipt->pt._key = mask; + ipt->req = req; + ipt->error = -22; + + mask = vfs_poll(req->file, &ipt->pt) & poll->events; + + spin_lock_irq(&ctx->completion_lock); + if (__builtin_expect(!!(poll->head), 1)) { + spin_lock(&poll->head->lock); + if (__builtin_expect(!!(list_empty(&poll->wait.entry)), 0)) { + if (ipt->error) + cancel = true; + ipt->error = 0; + mask = 0; + } + if (mask || ipt->error) + list_del_init(&poll->wait.entry); + else if (cancel) + do { do { extern void __compiletime_assert_1854(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(poll->canceled) == sizeof(char) || sizeof(poll->canceled) == sizeof(short) || sizeof(poll->canceled) == sizeof(int) || sizeof(poll->canceled) == sizeof(long)) || sizeof(poll->canceled) == sizeof(long long))) __compiletime_assert_1854(); } while (0); do { *(volatile typeof(poll->canceled) *)&(poll->canceled) = (true); } while (0); } while (0); + else if (!poll->done) + io_poll_req_insert(req); + spin_unlock(&poll->head->lock); + } + + return mask; +} + +static bool io_arm_poll_handler(struct io_kiocb *req) +{ + const struct io_op_def *def = &io_op_defs[req->opcode]; + struct io_ring_ctx *ctx = req->ctx; + struct async_poll *apoll; + struct io_poll_table ipt; + __poll_t mask, ret; + bool had_io; + + if (!req->file || !file_can_poll(req->file)) + return false; + if (req->flags & REQ_F_POLLED) + return false; + if (!def->pollin && !def->pollout) + return false; + + apoll = kmalloc(sizeof(*apoll), ((( gfp_t)0x20u)|(( gfp_t)0x200u)|(( gfp_t)0x800u))); + if (__builtin_expect(!!(!apoll), 0)) + return false; + + req->flags |= REQ_F_POLLED; + if (req->flags & REQ_F_WORK_INITIALIZED) + memcpy(&apoll->work, &req->work, sizeof(req->work)); + had_io = req->io != ((void *)0); + + io_get_req_task(req); + req->apoll = apoll; + INIT_HLIST_NODE(&req->hash_node); + + mask = 0; + if (def->pollin) + mask |= 0x0001 | 0x0040; + if (def->pollout) + mask |= 0x0004 | 0x0100; + mask |= 0x0008 | 0x0002; + + ipt.pt._qproc = io_async_queue_proc; + + ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, + io_async_wake); + if (ret) { + ipt.error = 0; + + if (!had_io) + io_poll_remove_double(req); + spin_unlock_irq(&ctx->completion_lock); + if (req->flags & REQ_F_WORK_INITIALIZED) + memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll); + return false; + } + spin_unlock_irq(&ctx->completion_lock); + trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask, + apoll->poll.events); + return true; +} + +static bool __io_poll_remove_one(struct io_kiocb *req, + struct io_poll_iocb *poll) +{ + bool do_complete = false; + + spin_lock(&poll->head->lock); + do { do { extern void __compiletime_assert_1855(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(poll->canceled) == sizeof(char) || sizeof(poll->canceled) == sizeof(short) || sizeof(poll->canceled) == sizeof(int) || sizeof(poll->canceled) == sizeof(long)) || sizeof(poll->canceled) == sizeof(long long))) __compiletime_assert_1855(); } while (0); do { *(volatile typeof(poll->canceled) *)&(poll->canceled) = (true); } while (0); } while (0); + if (!list_empty(&poll->wait.entry)) { + list_del_init(&poll->wait.entry); + do_complete = true; + } + spin_unlock(&poll->head->lock); + hash_del(&req->hash_node); + return do_complete; +} + +static bool io_poll_remove_one(struct io_kiocb *req) +{ + bool do_complete; + + if (req->opcode == IORING_OP_POLL_ADD) { + io_poll_remove_double(req); + do_complete = __io_poll_remove_one(req, &req->poll); + } else { + struct async_poll *apoll = req->apoll; + + + do_complete = __io_poll_remove_one(req, &apoll->poll); + if (do_complete) { + io_put_req(req); + + + + + + if (req->flags & REQ_F_WORK_INITIALIZED) + memcpy(&req->work, &apoll->work, + sizeof(req->work)); + kfree(apoll); + } + } + + if (do_complete) { + io_cqring_fill_event(req, -125); + io_commit_cqring(req->ctx); + req->flags |= REQ_F_COMP_LOCKED; + io_put_req(req); + } + + return do_complete; +} + +static void io_poll_remove_all(struct io_ring_ctx *ctx) +{ + struct hlist_node *tmp; + struct io_kiocb *req; + int posted = 0, i; + + spin_lock_irq(&ctx->completion_lock); + for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { + struct hlist_head *list; + + list = &ctx->cancel_hash[i]; + for (req = ({ typeof((list)->first) ____ptr = ((list)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1856(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*req) *)0)->hash_node)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1856(); } while (0); ((typeof(*req) *)(__mptr - __builtin_offsetof(typeof(*req), hash_node))); }) : ((void *)0); }); req && ({ tmp = req->hash_node.next; 1; }); req = ({ typeof(tmp) ____ptr = (tmp); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1857(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*req) *)0)->hash_node)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1857(); } while (0); ((typeof(*req) *)(__mptr - __builtin_offsetof(typeof(*req), hash_node))); }) : ((void *)0); })) + posted += io_poll_remove_one(req); + } + spin_unlock_irq(&ctx->completion_lock); + + if (posted) + io_cqring_ev_posted(ctx); +} + +static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) +{ + struct hlist_head *list; + struct io_kiocb *req; + + list = &ctx->cancel_hash[hash_64_generic(sqe_addr, ctx->cancel_hash_bits)]; + for (req = ({ typeof((list)->first) ____ptr = ((list)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1858(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(req)) *)0)->hash_node)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1858(); } while (0); ((typeof(*(req)) *)(__mptr - __builtin_offsetof(typeof(*(req)), hash_node))); }) : ((void *)0); }); req; req = ({ typeof((req)->hash_node.next) ____ptr = ((req)->hash_node.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1859(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(req)) *)0)->hash_node)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1859(); } while (0); ((typeof(*(req)) *)(__mptr - __builtin_offsetof(typeof(*(req)), hash_node))); }) : ((void *)0); })) { + if (sqe_addr != req->user_data) + continue; + if (io_poll_remove_one(req)) + return 0; + return -114; + } + + return -2; +} + +static int io_poll_remove_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || + sqe->poll_events) + return -22; + + req->poll.addr = ({ do { extern void __compiletime_assert_1860(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1860(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }); + return 0; +} + + + + + +static int io_poll_remove(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + u64 addr; + int ret; + + addr = req->poll.addr; + spin_lock_irq(&ctx->completion_lock); + ret = io_poll_cancel(ctx, addr); + spin_unlock_irq(&ctx->completion_lock); + + if (ret < 0) + req_set_fail_links(req); + io_req_complete(req, ret); + return 0; +} + +static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, + void *key) +{ + struct io_kiocb *req = wait->private; + struct io_poll_iocb *poll = &req->poll; + + return __io_async_wake(req, poll, (( __poll_t)(uintptr_t)(void *)(key)), io_poll_task_func); +} + +static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, + struct poll_table_struct *p) +{ + struct io_poll_table *pt = ({ void *__mptr = (void *)(p); do { extern void __compiletime_assert_1861(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(p)), typeof(((struct io_poll_table *)0)->pt)) && !__builtin_types_compatible_p(typeof(*(p)), typeof(void))))) __compiletime_assert_1861(); } while (0); ((struct io_poll_table *)(__mptr - __builtin_offsetof(struct io_poll_table, pt))); }); + + __io_queue_proc(&pt->req->poll, pt, head); +} + +static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_poll_iocb *poll = &req->poll; + u32 events; + + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) + return -22; + if (!poll->file) + return -9; + + events = ({ do { extern void __compiletime_assert_1862(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->poll32_events) == sizeof(char) || sizeof(sqe->poll32_events) == sizeof(short) || sizeof(sqe->poll32_events) == sizeof(int) || sizeof(sqe->poll32_events) == sizeof(long)) || sizeof(sqe->poll32_events) == sizeof(long long))) __compiletime_assert_1862(); } while (0); ({ typeof( _Generic((sqe->poll32_events), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->poll32_events))) __x = (*(const volatile typeof( _Generic((sqe->poll32_events), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->poll32_events))) *)&(sqe->poll32_events)); do { } while (0); (typeof(sqe->poll32_events))__x; }); }); + + + + poll->events = demangle_poll(events) | ( __poll_t)0x00000008 | ( __poll_t)0x00000010 | + (events & (( __poll_t)(1U << 28))); + + io_get_req_task(req); + return 0; +} + +static int io_poll_add(struct io_kiocb *req) +{ + struct io_poll_iocb *poll = &req->poll; + struct io_ring_ctx *ctx = req->ctx; + struct io_poll_table ipt; + __poll_t mask; + + INIT_HLIST_NODE(&req->hash_node); + INIT_LIST_HEAD(&req->list); + ipt.pt._qproc = io_poll_queue_proc; + + mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, + io_poll_wake); + + if (mask) { + ipt.error = 0; + io_poll_complete(req, mask, 0); + } + spin_unlock_irq(&ctx->completion_lock); + + if (mask) { + io_cqring_ev_posted(ctx); + io_put_req(req); + } + return ipt.error; +} + +static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) +{ + struct io_timeout_data *data = ({ void *__mptr = (void *)(timer); do { extern void __compiletime_assert_1863(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(timer)), typeof(((struct io_timeout_data *)0)->timer)) && !__builtin_types_compatible_p(typeof(*(timer)), typeof(void))))) __compiletime_assert_1863(); } while (0); ((struct io_timeout_data *)(__mptr - __builtin_offsetof(struct io_timeout_data, timer))); }) + ; + struct io_kiocb *req = data->req; + struct io_ring_ctx *ctx = req->ctx; + unsigned long flags; + + atomic_inc(&ctx->cq_timeouts); + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->completion_lock)); } while (0); } while (0); + + + + + if (!list_empty(&req->list)) + list_del_init(&req->list); + + io_cqring_fill_event(req, -62); + io_commit_cqring(ctx); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + + io_cqring_ev_posted(ctx); + req_set_fail_links(req); + io_put_req(req); + return HRTIMER_NORESTART; +} + +static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) +{ + struct io_kiocb *req; + int ret = -2; + + for (req = ({ void *__mptr = (void *)((&ctx->timeout_list)->next); do { extern void __compiletime_assert_1864(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ctx->timeout_list)->next)), typeof(((typeof(*req) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&ctx->timeout_list)->next)), typeof(void))))) __compiletime_assert_1864(); } while (0); ((typeof(*req) *)(__mptr - __builtin_offsetof(typeof(*req), list))); }); &req->list != (&ctx->timeout_list); req = ({ void *__mptr = (void *)((req)->list.next); do { extern void __compiletime_assert_1865(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((req)->list.next)), typeof(((typeof(*(req)) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((req)->list.next)), typeof(void))))) __compiletime_assert_1865(); } while (0); ((typeof(*(req)) *)(__mptr - __builtin_offsetof(typeof(*(req)), list))); })) { + if (user_data == req->user_data) { + list_del_init(&req->list); + ret = 0; + break; + } + } + + if (ret == -2) + return ret; + + ret = hrtimer_try_to_cancel(&req->io->timeout.timer); + if (ret == -1) + return -114; + + req_set_fail_links(req); + io_cqring_fill_event(req, -125); + io_put_req(req); + return 0; +} + +static int io_timeout_remove_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) + return -22; + + req->timeout.addr = ({ do { extern void __compiletime_assert_1866(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1866(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }); + req->timeout.flags = ({ do { extern void __compiletime_assert_1867(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->timeout_flags) == sizeof(char) || sizeof(sqe->timeout_flags) == sizeof(short) || sizeof(sqe->timeout_flags) == sizeof(int) || sizeof(sqe->timeout_flags) == sizeof(long)) || sizeof(sqe->timeout_flags) == sizeof(long long))) __compiletime_assert_1867(); } while (0); ({ typeof( _Generic((sqe->timeout_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->timeout_flags))) __x = (*(const volatile typeof( _Generic((sqe->timeout_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->timeout_flags))) *)&(sqe->timeout_flags)); do { } while (0); (typeof(sqe->timeout_flags))__x; }); }); + if (req->timeout.flags) + return -22; + + return 0; +} + + + + +static int io_timeout_remove(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + spin_lock_irq(&ctx->completion_lock); + ret = io_timeout_cancel(ctx, req->timeout.addr); + + io_cqring_fill_event(req, ret); + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + io_cqring_ev_posted(ctx); + if (ret < 0) + req_set_fail_links(req); + io_put_req(req); + return 0; +} + +static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, + bool is_timeout_link) +{ + struct io_timeout_data *data; + unsigned flags; + u32 off = ({ do { extern void __compiletime_assert_1868(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1868(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + if (sqe->ioprio || sqe->buf_index || sqe->len != 1) + return -22; + if (off && is_timeout_link) + return -22; + flags = ({ do { extern void __compiletime_assert_1869(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->timeout_flags) == sizeof(char) || sizeof(sqe->timeout_flags) == sizeof(short) || sizeof(sqe->timeout_flags) == sizeof(int) || sizeof(sqe->timeout_flags) == sizeof(long)) || sizeof(sqe->timeout_flags) == sizeof(long long))) __compiletime_assert_1869(); } while (0); ({ typeof( _Generic((sqe->timeout_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->timeout_flags))) __x = (*(const volatile typeof( _Generic((sqe->timeout_flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->timeout_flags))) *)&(sqe->timeout_flags)); do { } while (0); (typeof(sqe->timeout_flags))__x; }); }); + if (flags & ~(1U << 0)) + return -22; + + req->timeout.off = off; + + if (!req->io && io_alloc_async_ctx(req)) + return -12; + + data = &req->io->timeout; + data->req = req; + + if (get_timespec64(&data->ts, ( { ({ u64 __dummy; typeof((sqe->addr)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(sqe->addr); } ))) + return -14; + + if (flags & (1U << 0)) + data->mode = HRTIMER_MODE_ABS; + else + data->mode = HRTIMER_MODE_REL; + + hrtimer_init(&data->timer, 1, data->mode); + return 0; +} + +static int io_timeout(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_timeout_data *data = &req->io->timeout; + struct list_head *entry; + u32 tail, off = req->timeout.off; + + spin_lock_irq(&ctx->completion_lock); + + + + + + + if (io_is_timeout_noseq(req)) { + entry = ctx->timeout_list.prev; + goto add; + } + + tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); + req->timeout.target_seq = tail + off; + + + + + + for (entry = (&ctx->timeout_list)->prev; entry != (&ctx->timeout_list); entry = entry->prev) { + struct io_kiocb *nxt = ({ void *__mptr = (void *)(entry); do { extern void __compiletime_assert_1870(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(entry)), typeof(((struct io_kiocb *)0)->list)) && !__builtin_types_compatible_p(typeof(*(entry)), typeof(void))))) __compiletime_assert_1870(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, list))); }); + + if (io_is_timeout_noseq(nxt)) + continue; + + if (off >= nxt->timeout.target_seq - tail) + break; + } +add: + list_add(&req->list, entry); + data->timer.function = io_timeout_fn; + hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); + spin_unlock_irq(&ctx->completion_lock); + return 0; +} + +static bool io_cancel_cb(struct io_wq_work *work, void *data) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_1871(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct io_kiocb *)0)->work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_1871(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, work))); }); + + return req->user_data == (unsigned long) data; +} + +static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) +{ + enum io_wq_cancel cancel_ret; + int ret = 0; + + cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false); + switch (cancel_ret) { + case IO_WQ_CANCEL_OK: + ret = 0; + break; + case IO_WQ_CANCEL_RUNNING: + ret = -114; + break; + case IO_WQ_CANCEL_NOTFOUND: + ret = -2; + break; + } + + return ret; +} + +static void io_async_find_and_cancel(struct io_ring_ctx *ctx, + struct io_kiocb *req, __u64 sqe_addr, + int success_ret) +{ + unsigned long flags; + int ret; + + ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr); + if (ret != -2) { + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->completion_lock)); } while (0); } while (0); + goto done; + } + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->completion_lock)); } while (0); } while (0); + ret = io_timeout_cancel(ctx, sqe_addr); + if (ret != -2) + goto done; + ret = io_poll_cancel(ctx, sqe_addr); +done: + if (!ret) + ret = success_ret; + io_cqring_fill_event(req, ret); + io_commit_cqring(ctx); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + io_cqring_ev_posted(ctx); + + if (ret < 0) + req_set_fail_links(req); + io_put_req(req); +} + +static int io_async_cancel_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (__builtin_expect(!!(req->ctx->flags & (1U << 0)), 0)) + return -22; + if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || + sqe->cancel_flags) + return -22; + + req->cancel.addr = ({ do { extern void __compiletime_assert_1872(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1872(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }); + return 0; +} + +static int io_async_cancel(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + io_async_find_and_cancel(ctx, req, req->cancel.addr, 0); + return 0; +} + +static int io_files_update_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + if (sqe->flags || sqe->ioprio || sqe->rw_flags) + return -22; + + req->files_update.offset = ({ do { extern void __compiletime_assert_1873(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->off) == sizeof(char) || sizeof(sqe->off) == sizeof(short) || sizeof(sqe->off) == sizeof(int) || sizeof(sqe->off) == sizeof(long)) || sizeof(sqe->off) == sizeof(long long))) __compiletime_assert_1873(); } while (0); ({ typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) __x = (*(const volatile typeof( _Generic((sqe->off), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->off))) *)&(sqe->off)); do { } while (0); (typeof(sqe->off))__x; }); }); + req->files_update.nr_args = ({ do { extern void __compiletime_assert_1874(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->len) == sizeof(char) || sizeof(sqe->len) == sizeof(short) || sizeof(sqe->len) == sizeof(int) || sizeof(sqe->len) == sizeof(long)) || sizeof(sqe->len) == sizeof(long long))) __compiletime_assert_1874(); } while (0); ({ typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) __x = (*(const volatile typeof( _Generic((sqe->len), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->len))) *)&(sqe->len)); do { } while (0); (typeof(sqe->len))__x; }); }); + if (!req->files_update.nr_args) + return -22; + req->files_update.arg = ({ do { extern void __compiletime_assert_1875(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->addr) == sizeof(char) || sizeof(sqe->addr) == sizeof(short) || sizeof(sqe->addr) == sizeof(int) || sizeof(sqe->addr) == sizeof(long)) || sizeof(sqe->addr) == sizeof(long long))) __compiletime_assert_1875(); } while (0); ({ typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) __x = (*(const volatile typeof( _Generic((sqe->addr), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->addr))) *)&(sqe->addr)); do { } while (0); (typeof(sqe->addr))__x; }); }); + return 0; +} + +static int io_files_update(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_uring_files_update up; + int ret; + + if (force_nonblock) + return -11; + + up.offset = req->files_update.offset; + up.fds = req->files_update.arg; + + mutex_lock_nested(&ctx->uring_lock, 0); + ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args); + mutex_unlock(&ctx->uring_lock); + + if (ret < 0) + req_set_fail_links(req); + __io_req_complete(req, ret, 0, cs); + return 0; +} + +static int io_req_defer_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + ssize_t ret = 0; + + if (!sqe) + return 0; + + if (io_op_defs[req->opcode].file_table) { + io_req_init_async(req); + ret = io_grab_files(req); + if (__builtin_expect(!!(ret), 0)) + return ret; + } + + switch (req->opcode) { + case IORING_OP_NOP: + break; + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_READ: + ret = io_read_prep(req, sqe, true); + break; + case IORING_OP_WRITEV: + case IORING_OP_WRITE_FIXED: + case IORING_OP_WRITE: + ret = io_write_prep(req, sqe, true); + break; + case IORING_OP_POLL_ADD: + ret = io_poll_add_prep(req, sqe); + break; + case IORING_OP_POLL_REMOVE: + ret = io_poll_remove_prep(req, sqe); + break; + case IORING_OP_FSYNC: + ret = io_prep_fsync(req, sqe); + break; + case IORING_OP_SYNC_FILE_RANGE: + ret = io_prep_sfr(req, sqe); + break; + case IORING_OP_SENDMSG: + case IORING_OP_SEND: + ret = io_sendmsg_prep(req, sqe); + break; + case IORING_OP_RECVMSG: + case IORING_OP_RECV: + ret = io_recvmsg_prep(req, sqe); + break; + case IORING_OP_CONNECT: + ret = io_connect_prep(req, sqe); + break; + case IORING_OP_TIMEOUT: + ret = io_timeout_prep(req, sqe, false); + break; + case IORING_OP_TIMEOUT_REMOVE: + ret = io_timeout_remove_prep(req, sqe); + break; + case IORING_OP_ASYNC_CANCEL: + ret = io_async_cancel_prep(req, sqe); + break; + case IORING_OP_LINK_TIMEOUT: + ret = io_timeout_prep(req, sqe, true); + break; + case IORING_OP_ACCEPT: + ret = io_accept_prep(req, sqe); + break; + case IORING_OP_FALLOCATE: + ret = io_fallocate_prep(req, sqe); + break; + case IORING_OP_OPENAT: + ret = io_openat_prep(req, sqe); + break; + case IORING_OP_CLOSE: + ret = io_close_prep(req, sqe); + break; + case IORING_OP_FILES_UPDATE: + ret = io_files_update_prep(req, sqe); + break; + case IORING_OP_STATX: + ret = io_statx_prep(req, sqe); + break; + case IORING_OP_FADVISE: + ret = io_fadvise_prep(req, sqe); + break; + case IORING_OP_MADVISE: + ret = io_madvise_prep(req, sqe); + break; + case IORING_OP_OPENAT2: + ret = io_openat2_prep(req, sqe); + break; + case IORING_OP_EPOLL_CTL: + ret = io_epoll_ctl_prep(req, sqe); + break; + case IORING_OP_SPLICE: + ret = io_splice_prep(req, sqe); + break; + case IORING_OP_PROVIDE_BUFFERS: + ret = io_provide_buffers_prep(req, sqe); + break; + case IORING_OP_REMOVE_BUFFERS: + ret = io_remove_buffers_prep(req, sqe); + break; + case IORING_OP_TEE: + ret = io_tee_prep(req, sqe); + break; + default: + ({ static bool __attribute__((__section__(".data.once"))) __print_once; bool __ret_print_once = !__print_once; if (!__print_once) { __print_once = true; printk("\001" "4" "io_uring: unhandled opcode %d\n", req->opcode); } __builtin_expect(!!(__ret_print_once), 0); }) + ; + ret = -22; + break; + } + + return ret; +} + +static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + + if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list)) + return 0; + + if (!req->io) { + if (io_alloc_async_ctx(req)) + return -11; + ret = io_req_defer_prep(req, sqe); + if (ret < 0) + return ret; + } + io_prep_async_link(req); + + spin_lock_irq(&ctx->completion_lock); + if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { + spin_unlock_irq(&ctx->completion_lock); + return 0; + } + + trace_io_uring_defer(ctx, req, req->user_data); + list_add_tail(&req->list, &ctx->defer_list); + spin_unlock_irq(&ctx->completion_lock); + return -529; +} + +static void io_cleanup_req(struct io_kiocb *req) +{ + struct io_async_ctx *io = req->io; + + switch (req->opcode) { + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_READ: + if (req->flags & REQ_F_BUFFER_SELECTED) + kfree((void *)(unsigned long)req->rw.addr); + + case IORING_OP_WRITEV: + case IORING_OP_WRITE_FIXED: + case IORING_OP_WRITE: + if (io->rw.iov != io->rw.fast_iov) + kfree(io->rw.iov); + break; + case IORING_OP_RECVMSG: + if (req->flags & REQ_F_BUFFER_SELECTED) + kfree(req->sr_msg.kbuf); + + case IORING_OP_SENDMSG: + if (io->msg.iov != io->msg.fast_iov) + kfree(io->msg.iov); + break; + case IORING_OP_RECV: + if (req->flags & REQ_F_BUFFER_SELECTED) + kfree(req->sr_msg.kbuf); + break; + case IORING_OP_OPENAT: + case IORING_OP_OPENAT2: + break; + case IORING_OP_SPLICE: + case IORING_OP_TEE: + io_put_file(req, req->splice.file_in, + (req->splice.flags & (1U << 31))); + break; + } + + req->flags &= ~REQ_F_NEED_CLEANUP; +} + +static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, + bool force_nonblock, struct io_comp_state *cs) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; + + switch (req->opcode) { + case IORING_OP_NOP: + ret = io_nop(req, cs); + break; + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_READ: + if (sqe) { + ret = io_read_prep(req, sqe, force_nonblock); + if (ret < 0) + break; + } + ret = io_read(req, force_nonblock, cs); + break; + case IORING_OP_WRITEV: + case IORING_OP_WRITE_FIXED: + case IORING_OP_WRITE: + if (sqe) { + ret = io_write_prep(req, sqe, force_nonblock); + if (ret < 0) + break; + } + ret = io_write(req, force_nonblock, cs); + break; + case IORING_OP_FSYNC: + if (sqe) { + ret = io_prep_fsync(req, sqe); + if (ret < 0) + break; + } + ret = io_fsync(req, force_nonblock); + break; + case IORING_OP_POLL_ADD: + if (sqe) { + ret = io_poll_add_prep(req, sqe); + if (ret) + break; + } + ret = io_poll_add(req); + break; + case IORING_OP_POLL_REMOVE: + if (sqe) { + ret = io_poll_remove_prep(req, sqe); + if (ret < 0) + break; + } + ret = io_poll_remove(req); + break; + case IORING_OP_SYNC_FILE_RANGE: + if (sqe) { + ret = io_prep_sfr(req, sqe); + if (ret < 0) + break; + } + ret = io_sync_file_range(req, force_nonblock); + break; + case IORING_OP_SENDMSG: + case IORING_OP_SEND: + if (sqe) { + ret = io_sendmsg_prep(req, sqe); + if (ret < 0) + break; + } + if (req->opcode == IORING_OP_SENDMSG) + ret = io_sendmsg(req, force_nonblock, cs); + else + ret = io_send(req, force_nonblock, cs); + break; + case IORING_OP_RECVMSG: + case IORING_OP_RECV: + if (sqe) { + ret = io_recvmsg_prep(req, sqe); + if (ret) + break; + } + if (req->opcode == IORING_OP_RECVMSG) + ret = io_recvmsg(req, force_nonblock, cs); + else + ret = io_recv(req, force_nonblock, cs); + break; + case IORING_OP_TIMEOUT: + if (sqe) { + ret = io_timeout_prep(req, sqe, false); + if (ret) + break; + } + ret = io_timeout(req); + break; + case IORING_OP_TIMEOUT_REMOVE: + if (sqe) { + ret = io_timeout_remove_prep(req, sqe); + if (ret) + break; + } + ret = io_timeout_remove(req); + break; + case IORING_OP_ACCEPT: + if (sqe) { + ret = io_accept_prep(req, sqe); + if (ret) + break; + } + ret = io_accept(req, force_nonblock, cs); + break; + case IORING_OP_CONNECT: + if (sqe) { + ret = io_connect_prep(req, sqe); + if (ret) + break; + } + ret = io_connect(req, force_nonblock, cs); + break; + case IORING_OP_ASYNC_CANCEL: + if (sqe) { + ret = io_async_cancel_prep(req, sqe); + if (ret) + break; + } + ret = io_async_cancel(req); + break; + case IORING_OP_FALLOCATE: + if (sqe) { + ret = io_fallocate_prep(req, sqe); + if (ret) + break; + } + ret = io_fallocate(req, force_nonblock); + break; + case IORING_OP_OPENAT: + if (sqe) { + ret = io_openat_prep(req, sqe); + if (ret) + break; + } + ret = io_openat(req, force_nonblock); + break; + case IORING_OP_CLOSE: + if (sqe) { + ret = io_close_prep(req, sqe); + if (ret) + break; + } + ret = io_close(req, force_nonblock, cs); + break; + case IORING_OP_FILES_UPDATE: + if (sqe) { + ret = io_files_update_prep(req, sqe); + if (ret) + break; + } + ret = io_files_update(req, force_nonblock, cs); + break; + case IORING_OP_STATX: + if (sqe) { + ret = io_statx_prep(req, sqe); + if (ret) + break; + } + ret = io_statx(req, force_nonblock); + break; + case IORING_OP_FADVISE: + if (sqe) { + ret = io_fadvise_prep(req, sqe); + if (ret) + break; + } + ret = io_fadvise(req, force_nonblock); + break; + case IORING_OP_MADVISE: + if (sqe) { + ret = io_madvise_prep(req, sqe); + if (ret) + break; + } + ret = io_madvise(req, force_nonblock); + break; + case IORING_OP_OPENAT2: + if (sqe) { + ret = io_openat2_prep(req, sqe); + if (ret) + break; + } + ret = io_openat2(req, force_nonblock); + break; + case IORING_OP_EPOLL_CTL: + if (sqe) { + ret = io_epoll_ctl_prep(req, sqe); + if (ret) + break; + } + ret = io_epoll_ctl(req, force_nonblock, cs); + break; + case IORING_OP_SPLICE: + if (sqe) { + ret = io_splice_prep(req, sqe); + if (ret < 0) + break; + } + ret = io_splice(req, force_nonblock); + break; + case IORING_OP_PROVIDE_BUFFERS: + if (sqe) { + ret = io_provide_buffers_prep(req, sqe); + if (ret) + break; + } + ret = io_provide_buffers(req, force_nonblock, cs); + break; + case IORING_OP_REMOVE_BUFFERS: + if (sqe) { + ret = io_remove_buffers_prep(req, sqe); + if (ret) + break; + } + ret = io_remove_buffers(req, force_nonblock, cs); + break; + case IORING_OP_TEE: + if (sqe) { + ret = io_tee_prep(req, sqe); + if (ret < 0) + break; + } + ret = io_tee(req, force_nonblock); + break; + default: + ret = -22; + break; + } + + if (ret) + return ret; + + + if ((ctx->flags & (1U << 0)) && req->file) { + const bool in_async = io_wq_current_is_worker(); + + + if (in_async) + mutex_lock_nested(&ctx->uring_lock, 0); + + io_iopoll_req_issued(req); + + if (in_async) + mutex_unlock(&ctx->uring_lock); + } + + return 0; +} + +static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_1876(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct io_kiocb *)0)->work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_1876(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, work))); }); + struct io_kiocb *timeout; + int ret = 0; + + timeout = io_prep_linked_timeout(req); + if (timeout) + io_queue_linked_timeout(timeout); + + + if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) == + IO_WQ_WORK_CANCEL) { + ret = -125; + } + + if (!ret) { + do { + ret = io_issue_sqe(req, ((void *)0), false, ((void *)0)); + + + + + + if (ret != -11) + break; + ({ ___might_sleep("fs/io_uring.c", 5731, 0); _cond_resched(); }); + } while (1); + } + + if (ret) { + req_set_fail_links(req); + io_req_complete(req, ret); + } + + return io_steal_work(req); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) struct file *io_file_from_index(struct io_ring_ctx *ctx, + int index) +{ + struct fixed_file_table *table; + + table = &ctx->file_data->table[index >> 9]; + return table->files[index & ((1U << 9) - 1)]; +} + +static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, + int fd, struct file **out_file, bool fixed) +{ + struct io_ring_ctx *ctx = req->ctx; + struct file *file; + + if (fixed) { + if (__builtin_expect(!!(!ctx->file_data || (unsigned) fd >= ctx->nr_user_files), 0) + ) + return -9; + fd = ({ typeof(fd) _i = (fd); typeof(ctx->nr_user_files) _s = (ctx->nr_user_files); unsigned long _mask = array_index_mask_nospec(_i, _s); do { extern void __compiletime_assert_1877(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(_i) > sizeof(long)"))); if (!(!(sizeof(_i) > sizeof(long)))) __compiletime_assert_1877(); } while (0); do { extern void __compiletime_assert_1878(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(_s) > sizeof(long)"))); if (!(!(sizeof(_s) > sizeof(long)))) __compiletime_assert_1878(); } while (0); (typeof(_i)) (_i & _mask); }); + file = io_file_from_index(ctx, fd); + if (file) { + req->fixed_file_refs = ctx->file_data->cur_refs; + percpu_ref_get(req->fixed_file_refs); + } + } else { + trace_io_uring_file_get(ctx, fd); + file = __io_file_get(state, fd); + } + + if (file || io_op_defs[req->opcode].needs_file_no_error) { + *out_file = file; + return 0; + } + return -9; +} + +static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, + int fd) +{ + bool fixed; + + fixed = (req->flags & REQ_F_FIXED_FILE) != 0; + if (__builtin_expect(!!(!fixed && io_async_submit(req->ctx)), 0)) + return -9; + + return io_file_get(state, req, fd, &req->file, fixed); +} + +static int io_grab_files(struct io_kiocb *req) +{ + int ret = -9; + struct io_ring_ctx *ctx = req->ctx; + + if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE)) + return 0; + if (!ctx->ring_file) + return -9; + + rcu_read_lock(); + spin_lock_irq(&ctx->inflight_lock); + + + + + + + if (fcheck_files(get_current()->files, ctx->ring_fd) == ctx->ring_file) { + list_add(&req->inflight_entry, &ctx->inflight_list); + req->flags |= REQ_F_INFLIGHT; + req->work.files = get_current()->files; + ret = 0; + } + spin_unlock_irq(&ctx->inflight_lock); + rcu_read_unlock(); + + return ret; +} + +static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) +{ + struct io_timeout_data *data = ({ void *__mptr = (void *)(timer); do { extern void __compiletime_assert_1879(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(timer)), typeof(((struct io_timeout_data *)0)->timer)) && !__builtin_types_compatible_p(typeof(*(timer)), typeof(void))))) __compiletime_assert_1879(); } while (0); ((struct io_timeout_data *)(__mptr - __builtin_offsetof(struct io_timeout_data, timer))); }) + ; + struct io_kiocb *req = data->req; + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *prev = ((void *)0); + unsigned long flags; + + do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&ctx->completion_lock)); } while (0); } while (0); + + + + + + if (!list_empty(&req->link_list)) { + prev = ({ void *__mptr = (void *)(req->link_list.prev); do { extern void __compiletime_assert_1880(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(req->link_list.prev)), typeof(((struct io_kiocb *)0)->link_list)) && !__builtin_types_compatible_p(typeof(*(req->link_list.prev)), typeof(void))))) __compiletime_assert_1880(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, link_list))); }) + ; + if (refcount_inc_not_zero(&prev->refs)) { + list_del_init(&req->link_list); + prev->flags &= ~REQ_F_LINK_TIMEOUT; + } else + prev = ((void *)0); + } + + spin_unlock_irqrestore(&ctx->completion_lock, flags); + + if (prev) { + req_set_fail_links(prev); + io_async_find_and_cancel(ctx, req, prev->user_data, -62); + io_put_req(prev); + } else { + io_req_complete(req, -62); + } + return HRTIMER_NORESTART; +} + +static void io_queue_linked_timeout(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + + + + + spin_lock_irq(&ctx->completion_lock); + if (!list_empty(&req->link_list)) { + struct io_timeout_data *data = &req->io->timeout; + + data->timer.function = io_link_timeout_fn; + hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), + data->mode); + } + spin_unlock_irq(&ctx->completion_lock); + + + io_put_req(req); +} + +static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) +{ + struct io_kiocb *nxt; + + if (!(req->flags & REQ_F_LINK_HEAD)) + return ((void *)0); + if (req->flags & REQ_F_LINK_TIMEOUT) + return ((void *)0); + + nxt = ({ struct list_head *head__ = (&req->link_list); struct list_head *pos__ = ({ do { extern void __compiletime_assert_1881(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(head__->next) == sizeof(char) || sizeof(head__->next) == sizeof(short) || sizeof(head__->next) == sizeof(int) || sizeof(head__->next) == sizeof(long)) || sizeof(head__->next) == sizeof(long long))) __compiletime_assert_1881(); } while (0); ({ typeof( _Generic((head__->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head__->next))) __x = (*(const volatile typeof( _Generic((head__->next), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (head__->next))) *)&(head__->next)); do { } while (0); (typeof(head__->next))__x; }); }); pos__ != head__ ? ({ void *__mptr = (void *)(pos__); do { extern void __compiletime_assert_1882(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(pos__)), typeof(((struct io_kiocb *)0)->link_list)) && !__builtin_types_compatible_p(typeof(*(pos__)), typeof(void))))) __compiletime_assert_1882(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, link_list))); }) : ((void *)0); }) + ; + if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT) + return ((void *)0); + + req->flags |= REQ_F_LINK_TIMEOUT; + return nxt; +} + +static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_comp_state *cs) +{ + struct io_kiocb *linked_timeout; + struct io_kiocb *nxt; + const struct cred *old_creds = ((void *)0); + int ret; + +again: + linked_timeout = io_prep_linked_timeout(req); + + if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds && + req->work.creds != ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("fs/io_uring.c", 5911, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })) { + if (old_creds) + revert_creds(old_creds); + if (old_creds == req->work.creds) + old_creds = ((void *)0); + else + old_creds = override_creds(req->work.creds); + } + + ret = io_issue_sqe(req, sqe, true, cs); + + + + + + if (ret == -11 && !(req->flags & REQ_F_NOWAIT)) { + if (io_arm_poll_handler(req)) { + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); + goto exit; + } +punt: + io_req_init_async(req); + + if (io_op_defs[req->opcode].file_table) { + ret = io_grab_files(req); + if (ret) + goto err; + } + + + + + + io_queue_async_work(req); + goto exit; + } + + if (__builtin_expect(!!(ret), 0)) { +err: + + req->flags &= ~REQ_F_LINK_TIMEOUT; + req_set_fail_links(req); + io_put_req(req); + io_req_complete(req, ret); + goto exit; + } + + + nxt = io_put_req_find_next(req); + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); + + if (nxt) { + req = nxt; + + if (req->flags & REQ_F_FORCE_ASYNC) + goto punt; + goto again; + } +exit: + if (old_creds) + revert_creds(old_creds); +} + +static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_comp_state *cs) +{ + int ret; + + ret = io_req_defer(req, sqe); + if (ret) { + if (ret != -529) { +fail_req: + req_set_fail_links(req); + io_put_req(req); + io_req_complete(req, ret); + } + } else if (req->flags & REQ_F_FORCE_ASYNC) { + if (!req->io) { + ret = -11; + if (io_alloc_async_ctx(req)) + goto fail_req; + ret = io_req_defer_prep(req, sqe); + if (__builtin_expect(!!(ret < 0), 0)) + goto fail_req; + } + + + + + + req->work.flags |= IO_WQ_WORK_CONCURRENT; + io_queue_async_work(req); + } else { + __io_queue_sqe(req, sqe, cs); + } +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void io_queue_link_head(struct io_kiocb *req, + struct io_comp_state *cs) +{ + if (__builtin_expect(!!(req->flags & REQ_F_FAIL_LINK), 0)) { + io_put_req(req); + io_req_complete(req, -125); + } else + io_queue_sqe(req, ((void *)0), cs); +} + +static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **link, struct io_comp_state *cs) +{ + struct io_ring_ctx *ctx = req->ctx; + int ret; +# 6033 "fs/io_uring.c" + if (*link) { + struct io_kiocb *head = *link; +# 6043 "fs/io_uring.c" + if (req->flags & REQ_F_IO_DRAIN) { + head->flags |= REQ_F_IO_DRAIN; + ctx->drain_next = 1; + } + if (io_alloc_async_ctx(req)) + return -11; + + ret = io_req_defer_prep(req, sqe); + if (ret) { + + head->flags |= REQ_F_FAIL_LINK; + return ret; + } + trace_io_uring_link(ctx, req, head); + io_get_req_task(req); + list_add_tail(&req->link_list, &head->link_list); + + + if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { + io_queue_link_head(head, cs); + *link = ((void *)0); + } + } else { + if (__builtin_expect(!!(ctx->drain_next), 0)) { + req->flags |= REQ_F_IO_DRAIN; + ctx->drain_next = 0; + } + if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { + req->flags |= REQ_F_LINK_HEAD; + INIT_LIST_HEAD(&req->link_list); + + if (io_alloc_async_ctx(req)) + return -11; + + ret = io_req_defer_prep(req, sqe); + if (ret) + req->flags |= REQ_F_FAIL_LINK; + *link = req; + } else { + io_queue_sqe(req, sqe, cs); + } + } + + return 0; +} + + + + +static void io_submit_state_end(struct io_submit_state *state) +{ + if (!list_empty(&state->comp.list)) + io_submit_flush_completions(&state->comp); + blk_finish_plug(&state->plug); + io_state_file_put(state); + if (state->free_reqs) + kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); +} + + + + +static void io_submit_state_start(struct io_submit_state *state, + struct io_ring_ctx *ctx, unsigned int max_ios) +{ + blk_start_plug(&state->plug); + + state->plug.nowait = true; + + state->comp.nr = 0; + INIT_LIST_HEAD(&state->comp.list); + state->comp.ctx = ctx; + state->free_reqs = 0; + state->file = ((void *)0); + state->ios_left = max_ios; +} + +static void io_commit_sqring(struct io_ring_ctx *ctx) +{ + struct io_rings *rings = ctx->rings; + + + + + + + do { do { extern void __compiletime_assert_1883(void) __attribute__((__error__("Need native word sized stores/loads for atomicity."))); if (!((sizeof(*&rings->sq.head) == sizeof(char) || sizeof(*&rings->sq.head) == sizeof(short) || sizeof(*&rings->sq.head) == sizeof(int) || sizeof(*&rings->sq.head) == sizeof(long)))) __compiletime_assert_1883(); } while (0); __asm__ __volatile__("": : :"memory"); do { do { extern void __compiletime_assert_1884(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(*&rings->sq.head) == sizeof(char) || sizeof(*&rings->sq.head) == sizeof(short) || sizeof(*&rings->sq.head) == sizeof(int) || sizeof(*&rings->sq.head) == sizeof(long)) || sizeof(*&rings->sq.head) == sizeof(long long))) __compiletime_assert_1884(); } while (0); do { *(volatile typeof(*&rings->sq.head) *)&(*&rings->sq.head) = (ctx->cached_sq_head); } while (0); } while (0); } while (0); +} +# 6140 "fs/io_uring.c" +static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) +{ + u32 *sq_array = ctx->sq_array; + unsigned head; +# 6153 "fs/io_uring.c" + head = ({ do { extern void __compiletime_assert_1885(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sq_array[ctx->cached_sq_head & ctx->sq_mask]) == sizeof(char) || sizeof(sq_array[ctx->cached_sq_head & ctx->sq_mask]) == sizeof(short) || sizeof(sq_array[ctx->cached_sq_head & ctx->sq_mask]) == sizeof(int) || sizeof(sq_array[ctx->cached_sq_head & ctx->sq_mask]) == sizeof(long)) || sizeof(sq_array[ctx->cached_sq_head & ctx->sq_mask]) == sizeof(long long))) __compiletime_assert_1885(); } while (0); ({ typeof( _Generic((sq_array[ctx->cached_sq_head & ctx->sq_mask]), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sq_array[ctx->cached_sq_head & ctx->sq_mask]))) __x = (*(const volatile typeof( _Generic((sq_array[ctx->cached_sq_head & ctx->sq_mask]), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sq_array[ctx->cached_sq_head & ctx->sq_mask]))) *)&(sq_array[ctx->cached_sq_head & ctx->sq_mask])); do { } while (0); (typeof(sq_array[ctx->cached_sq_head & ctx->sq_mask]))__x; }); }); + if (__builtin_expect(!!(head < ctx->sq_entries), 1)) + return &ctx->sq_sqes[head]; + + + ctx->cached_sq_dropped++; + do { do { extern void __compiletime_assert_1886(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ctx->rings->sq_dropped) == sizeof(char) || sizeof(ctx->rings->sq_dropped) == sizeof(short) || sizeof(ctx->rings->sq_dropped) == sizeof(int) || sizeof(ctx->rings->sq_dropped) == sizeof(long)) || sizeof(ctx->rings->sq_dropped) == sizeof(long long))) __compiletime_assert_1886(); } while (0); do { *(volatile typeof(ctx->rings->sq_dropped) *)&(ctx->rings->sq_dropped) = (ctx->cached_sq_dropped); } while (0); } while (0); + return ((void *)0); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void io_consume_sqe(struct io_ring_ctx *ctx) +{ + ctx->cached_sq_head++; +} + + + + + +static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, + const struct io_uring_sqe *sqe, + struct io_submit_state *state) +{ + unsigned int sqe_flags; + int id; + + + + + + + req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped; + req->opcode = ({ do { extern void __compiletime_assert_1887(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->opcode) == sizeof(char) || sizeof(sqe->opcode) == sizeof(short) || sizeof(sqe->opcode) == sizeof(int) || sizeof(sqe->opcode) == sizeof(long)) || sizeof(sqe->opcode) == sizeof(long long))) __compiletime_assert_1887(); } while (0); ({ typeof( _Generic((sqe->opcode), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->opcode))) __x = (*(const volatile typeof( _Generic((sqe->opcode), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->opcode))) *)&(sqe->opcode)); do { } while (0); (typeof(sqe->opcode))__x; }); }); + req->user_data = ({ do { extern void __compiletime_assert_1888(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->user_data) == sizeof(char) || sizeof(sqe->user_data) == sizeof(short) || sizeof(sqe->user_data) == sizeof(int) || sizeof(sqe->user_data) == sizeof(long)) || sizeof(sqe->user_data) == sizeof(long long))) __compiletime_assert_1888(); } while (0); ({ typeof( _Generic((sqe->user_data), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->user_data))) __x = (*(const volatile typeof( _Generic((sqe->user_data), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->user_data))) *)&(sqe->user_data)); do { } while (0); (typeof(sqe->user_data))__x; }); }); + req->io = ((void *)0); + req->file = ((void *)0); + req->ctx = ctx; + req->flags = 0; + + refcount_set(&req->refs, 2); + req->task = get_current(); + req->result = 0; + + if (__builtin_expect(!!(req->opcode >= IORING_OP_LAST), 0)) + return -22; + + if (__builtin_expect(!!(io_sq_thread_acquire_mm(ctx, req)), 0)) + return -14; + + sqe_flags = ({ do { extern void __compiletime_assert_1889(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->flags) == sizeof(char) || sizeof(sqe->flags) == sizeof(short) || sizeof(sqe->flags) == sizeof(int) || sizeof(sqe->flags) == sizeof(long)) || sizeof(sqe->flags) == sizeof(long long))) __compiletime_assert_1889(); } while (0); ({ typeof( _Generic((sqe->flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->flags))) __x = (*(const volatile typeof( _Generic((sqe->flags), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->flags))) *)&(sqe->flags)); do { } while (0); (typeof(sqe->flags))__x; }); }); + + if (__builtin_expect(!!(sqe_flags & ~((1U << IOSQE_FIXED_FILE_BIT)|(1U << IOSQE_IO_DRAIN_BIT)|(1U << IOSQE_IO_LINK_BIT)| (1U << IOSQE_IO_HARDLINK_BIT) | (1U << IOSQE_ASYNC_BIT) | (1U << IOSQE_BUFFER_SELECT_BIT))), 0)) + return -22; + + if ((sqe_flags & (1U << IOSQE_BUFFER_SELECT_BIT)) && + !io_op_defs[req->opcode].buffer_select) + return -95; + + id = ({ do { extern void __compiletime_assert_1890(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->personality) == sizeof(char) || sizeof(sqe->personality) == sizeof(short) || sizeof(sqe->personality) == sizeof(int) || sizeof(sqe->personality) == sizeof(long)) || sizeof(sqe->personality) == sizeof(long long))) __compiletime_assert_1890(); } while (0); ({ typeof( _Generic((sqe->personality), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->personality))) __x = (*(const volatile typeof( _Generic((sqe->personality), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->personality))) *)&(sqe->personality)); do { } while (0); (typeof(sqe->personality))__x; }); }); + if (id) { + io_req_init_async(req); + req->work.creds = idr_find(&ctx->personality_idr, id); + if (__builtin_expect(!!(!req->work.creds), 0)) + return -22; + get_cred(req->work.creds); + } + + + req->flags |= sqe_flags; + + if (!io_op_defs[req->opcode].needs_file) + return 0; + + return io_req_set_file(state, req, ({ do { extern void __compiletime_assert_1891(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(sqe->fd) == sizeof(char) || sizeof(sqe->fd) == sizeof(short) || sizeof(sqe->fd) == sizeof(int) || sizeof(sqe->fd) == sizeof(long)) || sizeof(sqe->fd) == sizeof(long long))) __compiletime_assert_1891(); } while (0); ({ typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) __x = (*(const volatile typeof( _Generic((sqe->fd), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (sqe->fd))) *)&(sqe->fd)); do { } while (0); (typeof(sqe->fd))__x; }); })); +} + +static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, + struct file *ring_file, int ring_fd) +{ + struct io_submit_state state; + struct io_kiocb *link = ((void *)0); + int i, submitted = 0; + + + if (test_bit(0, &ctx->sq_check_overflow)) { + if (!list_empty(&ctx->cq_overflow_list) && + !io_cqring_overflow_flush(ctx, false)) + return -16; + } + + + nr = __builtin_choose_expr(((!!(sizeof((typeof((typeof(nr))__builtin_choose_expr(((!!(sizeof((typeof(nr) *)1 == (typeof(ctx->sq_entries) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(nr) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(ctx->sq_entries) * 0l)) : (int *)8))))), ((nr) < (ctx->sq_entries) ? (nr) : (ctx->sq_entries)), ({ typeof(nr) __UNIQUE_ID___x1892 = (nr); typeof(ctx->sq_entries) __UNIQUE_ID___y1893 = (ctx->sq_entries); ((__UNIQUE_ID___x1892) < (__UNIQUE_ID___y1893) ? (__UNIQUE_ID___x1892) : (__UNIQUE_ID___y1893)); }))) *)1 == (typeof(io_sqring_entries(ctx)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((typeof(nr))__builtin_choose_expr(((!!(sizeof((typeof(nr) *)1 == (typeof(ctx->sq_entries) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(nr) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(ctx->sq_entries) * 0l)) : (int *)8))))), ((nr) < (ctx->sq_entries) ? (nr) : (ctx->sq_entries)), ({ typeof(nr) __UNIQUE_ID___x1892 = (nr); typeof(ctx->sq_entries) __UNIQUE_ID___y1893 = (ctx->sq_entries); ((__UNIQUE_ID___x1892) < (__UNIQUE_ID___y1893) ? (__UNIQUE_ID___x1892) : (__UNIQUE_ID___y1893)); }))) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(io_sqring_entries(ctx)) * 0l)) : (int *)8))))), (((typeof(nr))__builtin_choose_expr(((!!(sizeof((typeof(nr) *)1 == (typeof(ctx->sq_entries) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(nr) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(ctx->sq_entries) * 0l)) : (int *)8))))), ((nr) < (ctx->sq_entries) ? (nr) : (ctx->sq_entries)), ({ typeof(nr) __UNIQUE_ID___x1892 = (nr); typeof(ctx->sq_entries) __UNIQUE_ID___y1893 = (ctx->sq_entries); ((__UNIQUE_ID___x1892) < (__UNIQUE_ID___y1893) ? (__UNIQUE_ID___x1892) : (__UNIQUE_ID___y1893)); }))) < (io_sqring_entries(ctx)) ? ((typeof(nr))__builtin_choose_expr(((!!(sizeof((typeof(nr) *)1 == (typeof(ctx->sq_entries) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(nr) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(ctx->sq_entries) * 0l)) : (int *)8))))), ((nr) < (ctx->sq_entries) ? (nr) : (ctx->sq_entries)), ({ typeof(nr) __UNIQUE_ID___x1892 = (nr); typeof(ctx->sq_entries) __UNIQUE_ID___y1893 = (ctx->sq_entries); ((__UNIQUE_ID___x1892) < (__UNIQUE_ID___y1893) ? (__UNIQUE_ID___x1892) : (__UNIQUE_ID___y1893)); }))) : (io_sqring_entries(ctx))), ({ typeof((typeof(nr))__builtin_choose_expr(((!!(sizeof((typeof(nr) *)1 == (typeof(ctx->sq_entries) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(nr) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(ctx->sq_entries) * 0l)) : (int *)8))))), ((nr) < (ctx->sq_entries) ? (nr) : (ctx->sq_entries)), ({ typeof(nr) __UNIQUE_ID___x1892 = (nr); typeof(ctx->sq_entries) __UNIQUE_ID___y1893 = (ctx->sq_entries); ((__UNIQUE_ID___x1892) < (__UNIQUE_ID___y1893) ? (__UNIQUE_ID___x1892) : (__UNIQUE_ID___y1893)); }))) __UNIQUE_ID___x1894 = ((typeof(nr))__builtin_choose_expr(((!!(sizeof((typeof(nr) *)1 == (typeof(ctx->sq_entries) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(nr) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(ctx->sq_entries) * 0l)) : (int *)8))))), ((nr) < (ctx->sq_entries) ? (nr) : (ctx->sq_entries)), ({ typeof(nr) __UNIQUE_ID___x1892 = (nr); typeof(ctx->sq_entries) __UNIQUE_ID___y1893 = (ctx->sq_entries); ((__UNIQUE_ID___x1892) < (__UNIQUE_ID___y1893) ? (__UNIQUE_ID___x1892) : (__UNIQUE_ID___y1893)); }))); typeof(io_sqring_entries(ctx)) __UNIQUE_ID___y1895 = (io_sqring_entries(ctx)); ((__UNIQUE_ID___x1894) < (__UNIQUE_ID___y1895) ? (__UNIQUE_ID___x1894) : (__UNIQUE_ID___y1895)); })); + + if (!percpu_ref_tryget_many(&ctx->refs, nr)) + return -11; + + io_submit_state_start(&state, ctx, nr); + + ctx->ring_fd = ring_fd; + ctx->ring_file = ring_file; + + for (i = 0; i < nr; i++) { + const struct io_uring_sqe *sqe; + struct io_kiocb *req; + int err; + + sqe = io_get_sqe(ctx); + if (__builtin_expect(!!(!sqe), 0)) { + io_consume_sqe(ctx); + break; + } + req = io_alloc_req(ctx, &state); + if (__builtin_expect(!!(!req), 0)) { + if (!submitted) + submitted = -11; + break; + } + + err = io_init_req(ctx, req, sqe, &state); + io_consume_sqe(ctx); + + submitted++; + + if (__builtin_expect(!!(err), 0)) { +fail_req: + io_put_req(req); + io_req_complete(req, err); + break; + } + + trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, + true, io_async_submit(ctx)); + err = io_submit_sqe(req, sqe, &link, &state.comp); + if (err) + goto fail_req; + } + + if (__builtin_expect(!!(submitted != nr), 0)) { + int ref_used = (submitted == -11) ? 0 : submitted; + + percpu_ref_put_many(&ctx->refs, nr - ref_used); + } + if (link) + io_queue_link_head(link, &state.comp); + io_submit_state_end(&state); + + + io_commit_sqring(ctx); + + return submitted; +} + +static int io_sq_thread(void *data) +{ + struct io_ring_ctx *ctx = data; + const struct cred *old_cred; + struct wait_queue_entry wait = { .private = get_current(), .func = autoremove_wake_function, .entry = { &((wait).entry), &((wait).entry) }, }; + unsigned long timeout; + int ret = 0; + + complete(&ctx->sq_thread_comp); + + old_cred = override_creds(ctx->creds); + + timeout = jiffies + ctx->sq_thread_idle; + while (!kthread_should_park()) { + unsigned int to_submit; + + if (!list_empty(&ctx->poll_list)) { + unsigned nr_events = 0; + + mutex_lock_nested(&ctx->uring_lock, 0); + if (!list_empty(&ctx->poll_list) && !need_resched()) + io_do_iopoll(ctx, &nr_events, 0); + else + timeout = jiffies + ctx->sq_thread_idle; + mutex_unlock(&ctx->uring_lock); + } + + to_submit = io_sqring_entries(ctx); + + + + + + if (!to_submit || ret == -16 || need_resched()) { + + + + + + + io_sq_thread_drop_mm(ctx); +# 6354 "fs/io_uring.c" + if (!list_empty(&ctx->poll_list) || need_resched() || + (!(({ unsigned long __dummy; typeof(jiffies) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(timeout) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)((timeout) - (jiffies)) < 0)) && ret != -16 && + !percpu_ref_is_dying(&ctx->refs))) { + io_run_task_work(); + ({ ___might_sleep("fs/io_uring.c", 6358, 0); _cond_resched(); }); + continue; + } + + prepare_to_wait(&ctx->sqo_wait, &wait, + 0x0001); +# 6372 "fs/io_uring.c" + if ((ctx->flags & (1U << 0)) && + !list_empty_careful(&ctx->poll_list)) { + finish_wait(&ctx->sqo_wait, &wait); + continue; + } + + + ctx->rings->sq_flags |= (1U << 0); + + asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc"); + + to_submit = io_sqring_entries(ctx); + if (!to_submit || ret == -16) { + if (kthread_should_park()) { + finish_wait(&ctx->sqo_wait, &wait); + break; + } + if (io_run_task_work()) { + finish_wait(&ctx->sqo_wait, &wait); + continue; + } + if (signal_pending(get_current())) + flush_signals(get_current()); + schedule(); + finish_wait(&ctx->sqo_wait, &wait); + + ctx->rings->sq_flags &= ~(1U << 0); + ret = 0; + continue; + } + finish_wait(&ctx->sqo_wait, &wait); + + ctx->rings->sq_flags &= ~(1U << 0); + } + + mutex_lock_nested(&ctx->uring_lock, 0); + if (__builtin_expect(!!(!percpu_ref_is_dying(&ctx->refs)), 1)) + ret = io_submit_sqes(ctx, to_submit, ((void *)0), -1); + mutex_unlock(&ctx->uring_lock); + timeout = jiffies + ctx->sq_thread_idle; + } + + io_run_task_work(); + + io_sq_thread_drop_mm(ctx); + revert_creds(old_cred); + + kthread_parkme(); + + return 0; +} + +struct io_wait_queue { + struct wait_queue_entry wq; + struct io_ring_ctx *ctx; + unsigned to_wait; + unsigned nr_timeouts; +}; + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) bool io_should_wake(struct io_wait_queue *iowq, bool noflush) +{ + struct io_ring_ctx *ctx = iowq->ctx; + + + + + + + return io_cqring_events(ctx, noflush) >= iowq->to_wait || + atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; +} + +static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, + int wake_flags, void *key) +{ + struct io_wait_queue *iowq = ({ void *__mptr = (void *)(curr); do { extern void __compiletime_assert_1896(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(curr)), typeof(((struct io_wait_queue *)0)->wq)) && !__builtin_types_compatible_p(typeof(*(curr)), typeof(void))))) __compiletime_assert_1896(); } while (0); ((struct io_wait_queue *)(__mptr - __builtin_offsetof(struct io_wait_queue, wq))); }) + ; + + + if (!io_should_wake(iowq, true)) + return -1; + + return autoremove_wake_function(curr, mode, wake_flags, key); +} + + + + + +static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + const sigset_t *sig, size_t sigsz) +{ + struct io_wait_queue iowq = { + .wq = { + .private = get_current(), + .func = io_wake_function, + .entry = { &(iowq.wq.entry), &(iowq.wq.entry) }, + }, + .ctx = ctx, + .to_wait = min_events, + }; + struct io_rings *rings = ctx->rings; + int ret = 0; + + do { + if (io_cqring_events(ctx, false) >= min_events) + return 0; + if (!io_run_task_work()) + break; + } while (1); + + if (sig) { + + if (in_compat_syscall()) + ret = set_compat_user_sigmask((const compat_sigset_t *)sig, + sigsz); + else + + ret = set_user_sigmask(sig, sigsz); + + if (ret) + return ret; + } + + iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); + trace_io_uring_cqring_wait(ctx, min_events); + do { + prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, + 0x0001); + + if (io_run_task_work()) + continue; + if (signal_pending(get_current())) { + if (get_current()->jobctl & (1UL << 24)) { + spin_lock_irq(&get_current()->sighand->siglock); + get_current()->jobctl &= ~(1UL << 24); + recalc_sigpending(); + spin_unlock_irq(&get_current()->sighand->siglock); + continue; + } + ret = -4; + break; + } + if (io_should_wake(&iowq, false)) + break; + schedule(); + } while (1); + finish_wait(&ctx->wait, &iowq.wq); + + restore_saved_sigmask_unless(ret == -4); + + return ({ do { extern void __compiletime_assert_1897(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(rings->cq.head) == sizeof(char) || sizeof(rings->cq.head) == sizeof(short) || sizeof(rings->cq.head) == sizeof(int) || sizeof(rings->cq.head) == sizeof(long)) || sizeof(rings->cq.head) == sizeof(long long))) __compiletime_assert_1897(); } while (0); ({ typeof( _Generic((rings->cq.head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.head))) __x = (*(const volatile typeof( _Generic((rings->cq.head), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.head))) *)&(rings->cq.head)); do { } while (0); (typeof(rings->cq.head))__x; }); }) == ({ do { extern void __compiletime_assert_1898(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(rings->cq.tail) == sizeof(char) || sizeof(rings->cq.tail) == sizeof(short) || sizeof(rings->cq.tail) == sizeof(int) || sizeof(rings->cq.tail) == sizeof(long)) || sizeof(rings->cq.tail) == sizeof(long long))) __compiletime_assert_1898(); } while (0); ({ typeof( _Generic((rings->cq.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.tail))) __x = (*(const volatile typeof( _Generic((rings->cq.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (rings->cq.tail))) *)&(rings->cq.tail)); do { } while (0); (typeof(rings->cq.tail))__x; }); }) ? ret : 0; +} + +static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) +{ + + if (ctx->ring_sock) { + struct sock *sock = ctx->ring_sock->sk; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&sock->sk_receive_queue)) != ((void *)0)) + kfree_skb(skb); + } +# 6547 "fs/io_uring.c" +} + +static void io_file_ref_kill(struct percpu_ref *ref) +{ + struct fixed_file_data *data; + + data = ({ void *__mptr = (void *)(ref); do { extern void __compiletime_assert_1899(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(ref)), typeof(((struct fixed_file_data *)0)->refs)) && !__builtin_types_compatible_p(typeof(*(ref)), typeof(void))))) __compiletime_assert_1899(); } while (0); ((struct fixed_file_data *)(__mptr - __builtin_offsetof(struct fixed_file_data, refs))); }); + complete(&data->done); +} + +static int io_sqe_files_unregister(struct io_ring_ctx *ctx) +{ + struct fixed_file_data *data = ctx->file_data; + struct fixed_file_ref_node *ref_node = ((void *)0); + unsigned nr_tables, i; + + if (!data) + return -6; + + spin_lock(&data->lock); + if (!list_empty(&data->ref_list)) + ref_node = ({ void *__mptr = (void *)((&data->ref_list)->next); do { extern void __compiletime_assert_1900(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&data->ref_list)->next)), typeof(((struct fixed_file_ref_node *)0)->node)) && !__builtin_types_compatible_p(typeof(*((&data->ref_list)->next)), typeof(void))))) __compiletime_assert_1900(); } while (0); ((struct fixed_file_ref_node *)(__mptr - __builtin_offsetof(struct fixed_file_ref_node, node))); }) + ; + spin_unlock(&data->lock); + if (ref_node) + percpu_ref_kill(&ref_node->refs); + + percpu_ref_kill(&data->refs); + + + flush_delayed_work(&ctx->file_put_work); + wait_for_completion(&data->done); + + __io_sqe_files_unregister(ctx); + nr_tables = (((ctx->nr_user_files) + ((1U << 9)) - 1) / ((1U << 9))); + for (i = 0; i < nr_tables; i++) + kfree(data->table[i].files); + kfree(data->table); + percpu_ref_exit(&data->refs); + kfree(data); + ctx->file_data = ((void *)0); + ctx->nr_user_files = 0; + return 0; +} + +static void io_sq_thread_stop(struct io_ring_ctx *ctx) +{ + if (ctx->sqo_thread) { + wait_for_completion(&ctx->sq_thread_comp); + + + + + + kthread_park(ctx->sqo_thread); + kthread_stop(ctx->sqo_thread); + ctx->sqo_thread = ((void *)0); + } +} + +static void io_finish_async(struct io_ring_ctx *ctx) +{ + io_sq_thread_stop(ctx); + + if (ctx->io_wq) { + io_wq_destroy(ctx->io_wq); + ctx->io_wq = ((void *)0); + } +} + + + + + + + +static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) +{ + struct sock *sk = ctx->ring_sock->sk; + struct scm_fp_list *fpl; + struct sk_buff *skb; + int i, nr_files; + + fpl = kzalloc(sizeof(*fpl), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!fpl) + return -12; + + skb = alloc_skb(0, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!skb) { + kfree(fpl); + return -12; + } + + skb->sk = sk; + + nr_files = 0; + fpl->user = get_uid(ctx->user); + for (i = 0; i < nr; i++) { + struct file *file = io_file_from_index(ctx, i + offset); + + if (!file) + continue; + fpl->fp[nr_files] = get_file(file); + unix_inflight(fpl->user, fpl->fp[nr_files]); + nr_files++; + } + + if (nr_files) { + fpl->max = 253; + fpl->count = nr_files; + (*(struct unix_skb_parms *)&((skb)->cb)).fp = fpl; + skb->destructor = unix_destruct_scm; + refcount_add(skb->truesize, &sk->sk_wmem_alloc); + skb_queue_head(&sk->sk_receive_queue, skb); + + for (i = 0; i < nr_files; i++) + fput(fpl->fp[i]); + } else { + kfree_skb(skb); + kfree(fpl); + } + + return 0; +} + + + + + + +static int io_sqe_files_scm(struct io_ring_ctx *ctx) +{ + unsigned left, total; + int ret = 0; + + total = 0; + left = ctx->nr_user_files; + while (left) { + unsigned this_files = __builtin_choose_expr(((!!(sizeof((typeof((unsigned)(left)) *)1 == (typeof((unsigned)(253)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(left)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((unsigned)(253)) * 0l)) : (int *)8))))), (((unsigned)(left)) < ((unsigned)(253)) ? ((unsigned)(left)) : ((unsigned)(253))), ({ typeof((unsigned)(left)) __UNIQUE_ID___x1901 = ((unsigned)(left)); typeof((unsigned)(253)) __UNIQUE_ID___y1902 = ((unsigned)(253)); ((__UNIQUE_ID___x1901) < (__UNIQUE_ID___y1902) ? (__UNIQUE_ID___x1901) : (__UNIQUE_ID___y1902)); })); + + ret = __io_sqe_files_scm(ctx, this_files, total); + if (ret) + break; + left -= this_files; + total += this_files; + } + + if (!ret) + return 0; + + while (total < ctx->nr_user_files) { + struct file *file = io_file_from_index(ctx, total); + + if (file) + fput(file); + total++; + } + + return ret; +} + + + + + + + +static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables, + unsigned nr_files) +{ + int i; + + for (i = 0; i < nr_tables; i++) { + struct fixed_file_table *table = &ctx->file_data->table[i]; + unsigned this_files; + + this_files = __builtin_choose_expr(((!!(sizeof((typeof(nr_files) *)1 == (typeof((1U << 9)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(nr_files) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((1U << 9)) * 0l)) : (int *)8))))), ((nr_files) < ((1U << 9)) ? (nr_files) : ((1U << 9))), ({ typeof(nr_files) __UNIQUE_ID___x1903 = (nr_files); typeof((1U << 9)) __UNIQUE_ID___y1904 = ((1U << 9)); ((__UNIQUE_ID___x1903) < (__UNIQUE_ID___y1904) ? (__UNIQUE_ID___x1903) : (__UNIQUE_ID___y1904)); })); + table->files = kcalloc(this_files, sizeof(struct file *), + ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!table->files) + break; + nr_files -= this_files; + } + + if (i == nr_tables) + return 0; + + for (i = 0; i < nr_tables; i++) { + struct fixed_file_table *table = &ctx->file_data->table[i]; + kfree(table->files); + } + return 1; +} + +static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file) +{ + + struct sock *sock = ctx->ring_sock->sk; + struct sk_buff_head list, *head = &sock->sk_receive_queue; + struct sk_buff *skb; + int i; + + __skb_queue_head_init(&list); + + + + + + skb = skb_dequeue(head); + while (skb) { + struct scm_fp_list *fp; + + fp = (*(struct unix_skb_parms *)&((skb)->cb)).fp; + for (i = 0; i < fp->count; i++) { + int left; + + if (fp->fp[i] != file) + continue; + + unix_notinflight(fp->user, fp->fp[i]); + left = fp->count - 1 - i; + if (left) { + memmove(&fp->fp[i], &fp->fp[i + 1], + left * sizeof(struct file *)); + } + fp->count--; + if (!fp->count) { + kfree_skb(skb); + skb = ((void *)0); + } else { + __skb_queue_tail(&list, skb); + } + fput(file); + file = ((void *)0); + break; + } + + if (!file) + break; + + __skb_queue_tail(&list, skb); + + skb = skb_dequeue(head); + } + + if (skb_peek(&list)) { + spin_lock_irq(&head->lock); + while ((skb = __skb_dequeue(&list)) != ((void *)0)) + __skb_queue_tail(head, skb); + spin_unlock_irq(&head->lock); + } + + + +} + +struct io_file_put { + struct list_head list; + struct file *file; +}; + +static void __io_file_put_work(struct fixed_file_ref_node *ref_node) +{ + struct fixed_file_data *file_data = ref_node->file_data; + struct io_ring_ctx *ctx = file_data->ctx; + struct io_file_put *pfile, *tmp; + + for (pfile = ({ void *__mptr = (void *)((&ref_node->file_list)->next); do { extern void __compiletime_assert_1905(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ref_node->file_list)->next)), typeof(((typeof(*pfile) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((&ref_node->file_list)->next)), typeof(void))))) __compiletime_assert_1905(); } while (0); ((typeof(*pfile) *)(__mptr - __builtin_offsetof(typeof(*pfile), list))); }), tmp = ({ void *__mptr = (void *)((pfile)->list.next); do { extern void __compiletime_assert_1906(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((pfile)->list.next)), typeof(((typeof(*(pfile)) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((pfile)->list.next)), typeof(void))))) __compiletime_assert_1906(); } while (0); ((typeof(*(pfile)) *)(__mptr - __builtin_offsetof(typeof(*(pfile)), list))); }); &pfile->list != (&ref_node->file_list); pfile = tmp, tmp = ({ void *__mptr = (void *)((tmp)->list.next); do { extern void __compiletime_assert_1907(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((tmp)->list.next)), typeof(((typeof(*(tmp)) *)0)->list)) && !__builtin_types_compatible_p(typeof(*((tmp)->list.next)), typeof(void))))) __compiletime_assert_1907(); } while (0); ((typeof(*(tmp)) *)(__mptr - __builtin_offsetof(typeof(*(tmp)), list))); })) { + list_del(&pfile->list); + io_ring_file_put(ctx, pfile->file); + kfree(pfile); + } + + spin_lock(&file_data->lock); + list_del(&ref_node->node); + spin_unlock(&file_data->lock); + + percpu_ref_exit(&ref_node->refs); + kfree(ref_node); + percpu_ref_put(&file_data->refs); +} + +static void io_file_put_work(struct work_struct *work) +{ + struct io_ring_ctx *ctx; + struct llist_node *node; + + ctx = ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_1908(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct io_ring_ctx *)0)->file_put_work.work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_1908(); } while (0); ((struct io_ring_ctx *)(__mptr - __builtin_offsetof(struct io_ring_ctx, file_put_work.work))); }); + node = llist_del_all(&ctx->file_put_llist); + + while (node) { + struct fixed_file_ref_node *ref_node; + struct llist_node *next = node->next; + + ref_node = ({ void *__mptr = (void *)(node); do { extern void __compiletime_assert_1909(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(node)), typeof(((struct fixed_file_ref_node *)0)->llist)) && !__builtin_types_compatible_p(typeof(*(node)), typeof(void))))) __compiletime_assert_1909(); } while (0); ((struct fixed_file_ref_node *)(__mptr - __builtin_offsetof(struct fixed_file_ref_node, llist))); }); + __io_file_put_work(ref_node); + node = next; + } +} + +static void io_file_data_ref_zero(struct percpu_ref *ref) +{ + struct fixed_file_ref_node *ref_node; + struct io_ring_ctx *ctx; + bool first_add; + int delay = 250; + + ref_node = ({ void *__mptr = (void *)(ref); do { extern void __compiletime_assert_1910(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(ref)), typeof(((struct fixed_file_ref_node *)0)->refs)) && !__builtin_types_compatible_p(typeof(*(ref)), typeof(void))))) __compiletime_assert_1910(); } while (0); ((struct fixed_file_ref_node *)(__mptr - __builtin_offsetof(struct fixed_file_ref_node, refs))); }); + ctx = ref_node->file_data->ctx; + + if (percpu_ref_is_dying(&ctx->file_data->refs)) + delay = 0; + + first_add = llist_add(&ref_node->llist, &ctx->file_put_llist); + if (!delay) + mod_delayed_work(system_wq, &ctx->file_put_work, 0); + else if (first_add) + queue_delayed_work(system_wq, &ctx->file_put_work, delay); +} + +static struct fixed_file_ref_node *alloc_fixed_file_ref_node( + struct io_ring_ctx *ctx) +{ + struct fixed_file_ref_node *ref_node; + + ref_node = kzalloc(sizeof(*ref_node), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!ref_node) + return ERR_PTR(-12); + + if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero, + 0, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)))) { + kfree(ref_node); + return ERR_PTR(-12); + } + INIT_LIST_HEAD(&ref_node->node); + INIT_LIST_HEAD(&ref_node->file_list); + ref_node->file_data = ctx->file_data; + return ref_node; +} + +static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node) +{ + percpu_ref_exit(&ref_node->refs); + kfree(ref_node); +} + +static int io_sqe_files_register(struct io_ring_ctx *ctx, void *arg, + unsigned nr_args) +{ + __s32 *fds = (__s32 *) arg; + unsigned nr_tables; + struct file *file; + int fd, ret = 0; + unsigned i; + struct fixed_file_ref_node *ref_node; + + if (ctx->file_data) + return -16; + if (!nr_args) + return -22; + if (nr_args > (64 * (1U << 9))) + return -24; + + ctx->file_data = kzalloc(sizeof(*ctx->file_data), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!ctx->file_data) + return -12; + ctx->file_data->ctx = ctx; + __init_completion(&ctx->file_data->done); + INIT_LIST_HEAD(&ctx->file_data->ref_list); + do { static struct lock_class_key __key; __raw_spin_lock_init(spinlock_check(&ctx->file_data->lock), "&ctx->file_data->lock", &__key, LD_WAIT_CONFIG); } while (0); + + nr_tables = (((nr_args) + ((1U << 9)) - 1) / ((1U << 9))); + ctx->file_data->table = kcalloc(nr_tables, + sizeof(struct fixed_file_table), + ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!ctx->file_data->table) { + kfree(ctx->file_data); + ctx->file_data = ((void *)0); + return -12; + } + + if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill, + PERCPU_REF_ALLOW_REINIT, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)))) { + kfree(ctx->file_data->table); + kfree(ctx->file_data); + ctx->file_data = ((void *)0); + return -12; + } + + if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) { + percpu_ref_exit(&ctx->file_data->refs); + kfree(ctx->file_data->table); + kfree(ctx->file_data); + ctx->file_data = ((void *)0); + return -12; + } + + for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { + struct fixed_file_table *table; + unsigned index; + + ret = -14; + if (copy_from_user(&fd, &fds[i], sizeof(fd))) + break; + + if (fd == -1) { + ret = 0; + continue; + } + + table = &ctx->file_data->table[i >> 9]; + index = i & ((1U << 9) - 1); + file = fget(fd); + + ret = -9; + if (!file) + break; +# 6972 "fs/io_uring.c" + if (file->f_op == &io_uring_fops) { + fput(file); + break; + } + ret = 0; + table->files[index] = file; + } + + if (ret) { + for (i = 0; i < ctx->nr_user_files; i++) { + file = io_file_from_index(ctx, i); + if (file) + fput(file); + } + for (i = 0; i < nr_tables; i++) + kfree(ctx->file_data->table[i].files); + + kfree(ctx->file_data->table); + kfree(ctx->file_data); + ctx->file_data = ((void *)0); + ctx->nr_user_files = 0; + return ret; + } + + ret = io_sqe_files_scm(ctx); + if (ret) { + io_sqe_files_unregister(ctx); + return ret; + } + + ref_node = alloc_fixed_file_ref_node(ctx); + if (IS_ERR(ref_node)) { + io_sqe_files_unregister(ctx); + return PTR_ERR(ref_node); + } + + ctx->file_data->cur_refs = &ref_node->refs; + spin_lock(&ctx->file_data->lock); + list_add(&ref_node->node, &ctx->file_data->ref_list); + spin_unlock(&ctx->file_data->lock); + percpu_ref_get(&ctx->file_data->refs); + return ret; +} + +static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file, + int index) +{ + + struct sock *sock = ctx->ring_sock->sk; + struct sk_buff_head *head = &sock->sk_receive_queue; + struct sk_buff *skb; + + + + + + + spin_lock_irq(&head->lock); + skb = skb_peek(head); + if (skb) { + struct scm_fp_list *fpl = (*(struct unix_skb_parms *)&((skb)->cb)).fp; + + if (fpl->count < 253) { + __skb_unlink(skb, head); + spin_unlock_irq(&head->lock); + fpl->fp[fpl->count] = get_file(file); + unix_inflight(fpl->user, fpl->fp[fpl->count]); + fpl->count++; + spin_lock_irq(&head->lock); + __skb_queue_head(head, skb); + } else { + skb = ((void *)0); + } + } + spin_unlock_irq(&head->lock); + + if (skb) { + fput(file); + return 0; + } + + return __io_sqe_files_scm(ctx, 1, index); + + + +} + +static int io_queue_file_removal(struct fixed_file_data *data, + struct file *file) +{ + struct io_file_put *pfile; + struct percpu_ref *refs = data->cur_refs; + struct fixed_file_ref_node *ref_node; + + pfile = kzalloc(sizeof(*pfile), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!pfile) + return -12; + + ref_node = ({ void *__mptr = (void *)(refs); do { extern void __compiletime_assert_1911(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(refs)), typeof(((struct fixed_file_ref_node *)0)->refs)) && !__builtin_types_compatible_p(typeof(*(refs)), typeof(void))))) __compiletime_assert_1911(); } while (0); ((struct fixed_file_ref_node *)(__mptr - __builtin_offsetof(struct fixed_file_ref_node, refs))); }); + pfile->file = file; + list_add(&pfile->list, &ref_node->file_list); + + return 0; +} + +static int __io_sqe_files_update(struct io_ring_ctx *ctx, + struct io_uring_files_update *up, + unsigned nr_args) +{ + struct fixed_file_data *data = ctx->file_data; + struct fixed_file_ref_node *ref_node; + struct file *file; + __s32 *fds; + int fd, i, err; + __u32 done; + bool needs_switch = false; + + if (({ typeof(up->offset) __a = (up->offset); typeof(nr_args) __b = (nr_args); typeof(&done) __d = (&done); (void) (&__a == &__b); (void) (&__a == __d); __builtin_add_overflow(__a, __b, __d); })) + return -75; + if (done > ctx->nr_user_files) + return -22; + + ref_node = alloc_fixed_file_ref_node(ctx); + if (IS_ERR(ref_node)) + return PTR_ERR(ref_node); + + done = 0; + fds = ( { ({ u64 __dummy; typeof((up->fds)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)(up->fds); } ); + while (nr_args) { + struct fixed_file_table *table; + unsigned index; + + err = 0; + if (copy_from_user(&fd, &fds[done], sizeof(fd))) { + err = -14; + break; + } + i = ({ typeof(up->offset) _i = (up->offset); typeof(ctx->nr_user_files) _s = (ctx->nr_user_files); unsigned long _mask = array_index_mask_nospec(_i, _s); do { extern void __compiletime_assert_1912(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(_i) > sizeof(long)"))); if (!(!(sizeof(_i) > sizeof(long)))) __compiletime_assert_1912(); } while (0); do { extern void __compiletime_assert_1913(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(_s) > sizeof(long)"))); if (!(!(sizeof(_s) > sizeof(long)))) __compiletime_assert_1913(); } while (0); (typeof(_i)) (_i & _mask); }); + table = &ctx->file_data->table[i >> 9]; + index = i & ((1U << 9) - 1); + if (table->files[index]) { + file = io_file_from_index(ctx, index); + err = io_queue_file_removal(data, file); + if (err) + break; + table->files[index] = ((void *)0); + needs_switch = true; + } + if (fd != -1) { + file = fget(fd); + if (!file) { + err = -9; + break; + } +# 7134 "fs/io_uring.c" + if (file->f_op == &io_uring_fops) { + fput(file); + err = -9; + break; + } + table->files[index] = file; + err = io_sqe_file_register(ctx, file, i); + if (err) + break; + } + nr_args--; + done++; + up->offset++; + } + + if (needs_switch) { + percpu_ref_kill(data->cur_refs); + spin_lock(&data->lock); + list_add(&ref_node->node, &data->ref_list); + data->cur_refs = &ref_node->refs; + spin_unlock(&data->lock); + percpu_ref_get(&ctx->file_data->refs); + } else + destroy_fixed_file_ref_node(ref_node); + + return done ? done : err; +} + +static int io_sqe_files_update(struct io_ring_ctx *ctx, void *arg, + unsigned nr_args) +{ + struct io_uring_files_update up; + + if (!ctx->file_data) + return -6; + if (!nr_args) + return -22; + if (copy_from_user(&up, arg, sizeof(up))) + return -14; + if (up.resv) + return -22; + + return __io_sqe_files_update(ctx, &up, nr_args); +} + +static void io_free_work(struct io_wq_work *work) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_1914(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct io_kiocb *)0)->work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_1914(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, work))); }); + + + io_put_req(req); +} + +static int io_init_wq_offload(struct io_ring_ctx *ctx, + struct io_uring_params *p) +{ + struct io_wq_data data; + struct fd f; + struct io_ring_ctx *ctx_attach; + unsigned int concurrency; + int ret = 0; + + data.user = ctx->user; + data.free_work = io_free_work; + data.do_work = io_wq_submit_work; + + if (!(p->flags & (1U << 5))) { + + concurrency = __builtin_choose_expr(((!!(sizeof((typeof(ctx->sq_entries) *)1 == (typeof(4 * num_online_cpus()) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(ctx->sq_entries) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(4 * num_online_cpus()) * 0l)) : (int *)8))))), ((ctx->sq_entries) < (4 * num_online_cpus()) ? (ctx->sq_entries) : (4 * num_online_cpus())), ({ typeof(ctx->sq_entries) __UNIQUE_ID___x1915 = (ctx->sq_entries); typeof(4 * num_online_cpus()) __UNIQUE_ID___y1916 = (4 * num_online_cpus()); ((__UNIQUE_ID___x1915) < (__UNIQUE_ID___y1916) ? (__UNIQUE_ID___x1915) : (__UNIQUE_ID___y1916)); })); + + ctx->io_wq = io_wq_create(concurrency, &data); + if (IS_ERR(ctx->io_wq)) { + ret = PTR_ERR(ctx->io_wq); + ctx->io_wq = ((void *)0); + } + return ret; + } + + f = fdget(p->wq_fd); + if (!f.file) + return -9; + + if (f.file->f_op != &io_uring_fops) { + ret = -22; + goto out_fput; + } + + ctx_attach = f.file->private_data; + + if (!io_wq_get(ctx_attach->io_wq, &data)) { + ret = -22; + goto out_fput; + } + + ctx->io_wq = ctx_attach->io_wq; +out_fput: + fdput(f); + return ret; +} + +static int io_sq_offload_start(struct io_ring_ctx *ctx, + struct io_uring_params *p) +{ + int ret; + + if (ctx->flags & (1U << 1)) { + mmgrab(get_current()->mm); + ctx->sqo_mm = get_current()->mm; + + ret = -1; + if (!capable(21)) + goto err; + + ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); + if (!ctx->sq_thread_idle) + ctx->sq_thread_idle = 250; + + if (p->flags & (1U << 2)) { + int cpu = p->sq_thread_cpu; + + ret = -22; + if (cpu >= nr_cpu_ids) + goto err; + if (!cpumask_test_cpu((cpu), ((const struct cpumask *)&__cpu_online_mask))) + goto err; + + ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread, + ctx, cpu, + "io_uring-sq"); + } else { + ctx->sqo_thread = kthread_create_on_node(io_sq_thread, ctx, (-1), "io_uring-sq") + ; + } + if (IS_ERR(ctx->sqo_thread)) { + ret = PTR_ERR(ctx->sqo_thread); + ctx->sqo_thread = ((void *)0); + goto err; + } + wake_up_process(ctx->sqo_thread); + } else if (p->flags & (1U << 2)) { + + ret = -22; + goto err; + } + + ret = io_init_wq_offload(ctx, p); + if (ret) + goto err; + + return 0; +err: + io_finish_async(ctx); + if (ctx->sqo_mm) { + mmdrop(ctx->sqo_mm); + ctx->sqo_mm = ((void *)0); + } + return ret; +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) void __io_unaccount_mem(struct user_struct *user, + unsigned long nr_pages) +{ + atomic_long_sub(nr_pages, &user->locked_vm); +} + +static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) int __io_account_mem(struct user_struct *user, + unsigned long nr_pages) +{ + unsigned long page_limit, cur_pages, new_pages; + + + page_limit = rlimit(8) >> 12; + + do { + cur_pages = atomic_long_read(&user->locked_vm); + new_pages = cur_pages + nr_pages; + if (new_pages > page_limit) + return -12; + } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages, + new_pages) != cur_pages); + + return 0; +} + +static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, + enum io_mem_account acct) +{ + if (ctx->limit_mem) + __io_unaccount_mem(ctx->user, nr_pages); + + if (ctx->sqo_mm) { + if (acct == ACCT_LOCKED) + ctx->sqo_mm->locked_vm -= nr_pages; + else if (acct == ACCT_PINNED) + atomic64_sub(nr_pages, &ctx->sqo_mm->pinned_vm); + } +} + +static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, + enum io_mem_account acct) +{ + int ret; + + if (ctx->limit_mem) { + ret = __io_account_mem(ctx->user, nr_pages); + if (ret) + return ret; + } + + if (ctx->sqo_mm) { + if (acct == ACCT_LOCKED) + ctx->sqo_mm->locked_vm += nr_pages; + else if (acct == ACCT_PINNED) + atomic64_add(nr_pages, &ctx->sqo_mm->pinned_vm); + } + + return 0; +} + +static void io_mem_free(void *ptr) +{ + struct page *page; + + if (!ptr) + return; + + page = virt_to_head_page(ptr); + if (put_page_testzero(page)) + free_compound_page(page); +} + +static void *io_mem_alloc(size_t size) +{ + gfp_t gfp_flags = ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u)) | (( gfp_t)0x100u) | (( gfp_t)0x2000u) | (( gfp_t)0x40000u) | + (( gfp_t)0x10000u); + + return (void *) __get_free_pages(gfp_flags, get_order(size)); +} + +static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, + size_t *sq_offset) +{ + struct io_rings *rings; + size_t off, sq_array_size; + + off = __ab_c_size(cq_entries, sizeof(*(rings)->cqes) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof(((rings)->cqes)), typeof(&((rings)->cqes)[0])))); }))), sizeof(*(rings))); + if (off == (~(size_t)0)) + return (~(size_t)0); + + + off = ((((off)) + ((typeof((off)))(((1 << (6)))) - 1)) & ~((typeof((off)))(((1 << (6)))) - 1)); + if (off == 0) + return (~(size_t)0); + + + sq_array_size = array_size(sizeof(u32), sq_entries); + if (sq_array_size == (~(size_t)0)) + return (~(size_t)0); + + if (({ typeof(off) __a = (off); typeof(sq_array_size) __b = (sq_array_size); typeof(&off) __d = (&off); (void) (&__a == &__b); (void) (&__a == __d); __builtin_add_overflow(__a, __b, __d); })) + return (~(size_t)0); + + if (sq_offset) + *sq_offset = off; + + return off; +} + +static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries) +{ + size_t pages; + + pages = (size_t)1 << get_order( + rings_size(sq_entries, cq_entries, ((void *)0))); + pages += (size_t)1 << get_order( + array_size(sizeof(struct io_uring_sqe), sq_entries)); + + return pages; +} + +static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx) +{ + int i, j; + + if (!ctx->user_bufs) + return -6; + + for (i = 0; i < ctx->nr_user_bufs; i++) { + struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; + + for (j = 0; j < imu->nr_bvecs; j++) + unpin_user_page(imu->bvec[j].bv_page); + + io_unaccount_mem(ctx, imu->nr_bvecs, ACCT_PINNED); + kvfree(imu->bvec); + imu->nr_bvecs = 0; + } + + kfree(ctx->user_bufs); + ctx->user_bufs = ((void *)0); + ctx->nr_user_bufs = 0; + return 0; +} + +static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, + void *arg, unsigned index) +{ + struct iovec *src; + + + if (ctx->compat) { + struct compat_iovec *ciovs; + struct compat_iovec ciov; + + ciovs = (struct compat_iovec *) arg; + if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov))) + return -14; + + dst->iov_base = ( { ({ u64 __dummy; typeof(((u64)ciov.iov_base)) __dummy2; (void)(&__dummy == &__dummy2); 1; }); (void *)(uintptr_t)((u64)ciov.iov_base); } ); + dst->iov_len = ciov.iov_len; + return 0; + } + + src = (struct iovec *) arg; + if (copy_from_user(dst, &src[index], sizeof(*dst))) + return -14; + return 0; +} + +static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void *arg, + unsigned nr_args) +{ + struct vm_area_struct **vmas = ((void *)0); + struct page **pages = ((void *)0); + int i, j, got_pages = 0; + int ret = -22; + + if (ctx->user_bufs) + return -16; + if (!nr_args || nr_args > 1024) + return -22; + + ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf), + ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!ctx->user_bufs) + return -12; + + for (i = 0; i < nr_args; i++) { + struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; + unsigned long off, start, end, ubuf; + int pret, nr_pages; + struct iovec iov; + size_t size; + + ret = io_copy_iov(ctx, &iov, arg, i); + if (ret) + goto err; + + + + + + + ret = -14; + if (!iov.iov_base || !iov.iov_len) + goto err; + + + if (iov.iov_len > 0x40000000) + goto err; + + ubuf = (unsigned long) iov.iov_base; + end = (ubuf + iov.iov_len + ((1UL) << 12) - 1) >> 12; + start = ubuf >> 12; + nr_pages = end - start; + + ret = io_account_mem(ctx, nr_pages, ACCT_PINNED); + if (ret) + goto err; + + ret = 0; + if (!pages || nr_pages > got_pages) { + kvfree(vmas); + kvfree(pages); + pages = kvmalloc_array(nr_pages, sizeof(struct page *), + ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + vmas = kvmalloc_array(nr_pages, + sizeof(struct vm_area_struct *), + ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!pages || !vmas) { + ret = -12; + io_unaccount_mem(ctx, nr_pages, ACCT_PINNED); + goto err; + } + got_pages = nr_pages; + } + + imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec), + ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + ret = -12; + if (!imu->bvec) { + io_unaccount_mem(ctx, nr_pages, ACCT_PINNED); + goto err; + } + + ret = 0; + mmap_read_lock(get_current()->mm); + pret = pin_user_pages(ubuf, nr_pages, + 0x01 | 0x10000, + pages, vmas); + if (pret == nr_pages) { + + for (j = 0; j < nr_pages; j++) { + struct vm_area_struct *vma = vmas[j]; + + if (vma->vm_file && + !is_file_hugepages(vma->vm_file)) { + ret = -95; + break; + } + } + } else { + ret = pret < 0 ? pret : -14; + } + mmap_read_unlock(get_current()->mm); + if (ret) { + + + + + if (pret > 0) + unpin_user_pages(pages, pret); + io_unaccount_mem(ctx, nr_pages, ACCT_PINNED); + kvfree(imu->bvec); + goto err; + } + + off = ubuf & ~(~(((1UL) << 12)-1)); + size = iov.iov_len; + for (j = 0; j < nr_pages; j++) { + size_t vec_len; + + vec_len = __builtin_choose_expr(((!!(sizeof((typeof((size_t)(size)) *)1 == (typeof((size_t)(((1UL) << 12) - off)) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)((size_t)(size)) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)((size_t)(((1UL) << 12) - off)) * 0l)) : (int *)8))))), (((size_t)(size)) < ((size_t)(((1UL) << 12) - off)) ? ((size_t)(size)) : ((size_t)(((1UL) << 12) - off))), ({ typeof((size_t)(size)) __UNIQUE_ID___x1917 = ((size_t)(size)); typeof((size_t)(((1UL) << 12) - off)) __UNIQUE_ID___y1918 = ((size_t)(((1UL) << 12) - off)); ((__UNIQUE_ID___x1917) < (__UNIQUE_ID___y1918) ? (__UNIQUE_ID___x1917) : (__UNIQUE_ID___y1918)); })); + imu->bvec[j].bv_page = pages[j]; + imu->bvec[j].bv_len = vec_len; + imu->bvec[j].bv_offset = off; + off = 0; + size -= vec_len; + } + + imu->ubuf = ubuf; + imu->len = iov.iov_len; + imu->nr_bvecs = nr_pages; + + ctx->nr_user_bufs++; + } + kvfree(pages); + kvfree(vmas); + return 0; +err: + kvfree(pages); + kvfree(vmas); + io_sqe_buffer_unregister(ctx); + return ret; +} + +static int io_eventfd_register(struct io_ring_ctx *ctx, void *arg) +{ + __s32 *fds = arg; + int fd; + + if (ctx->cq_ev_fd) + return -16; + + if (copy_from_user(&fd, fds, sizeof(*fds))) + return -14; + + ctx->cq_ev_fd = eventfd_ctx_fdget(fd); + if (IS_ERR(ctx->cq_ev_fd)) { + int ret = PTR_ERR(ctx->cq_ev_fd); + ctx->cq_ev_fd = ((void *)0); + return ret; + } + + return 0; +} + +static int io_eventfd_unregister(struct io_ring_ctx *ctx) +{ + if (ctx->cq_ev_fd) { + eventfd_ctx_put(ctx->cq_ev_fd); + ctx->cq_ev_fd = ((void *)0); + return 0; + } + + return -6; +} + +static int __io_destroy_buffers(int id, void *p, void *data) +{ + struct io_ring_ctx *ctx = data; + struct io_buffer *buf = p; + + __io_remove_buffers(ctx, buf, id, -1U); + return 0; +} + +static void io_destroy_buffers(struct io_ring_ctx *ctx) +{ + idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx); + idr_destroy(&ctx->io_buffer_idr); +} + +static void io_ring_ctx_free(struct io_ring_ctx *ctx) +{ + io_finish_async(ctx); + if (ctx->sqo_mm) { + mmdrop(ctx->sqo_mm); + ctx->sqo_mm = ((void *)0); + } + + io_sqe_buffer_unregister(ctx); + io_sqe_files_unregister(ctx); + io_eventfd_unregister(ctx); + io_destroy_buffers(ctx); + idr_destroy(&ctx->personality_idr); + + + if (ctx->ring_sock) { + ctx->ring_sock->file = ((void *)0); + sock_release(ctx->ring_sock); + } + + + io_mem_free(ctx->rings); + io_mem_free(ctx->sq_sqes); + + percpu_ref_exit(&ctx->refs); + io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries), + ACCT_LOCKED); + free_uid(ctx->user); + put_cred(ctx->creds); + kfree(ctx->cancel_hash); + kmem_cache_free(req_cachep, ctx->fallback_req); + kfree(ctx); +} + +static __poll_t io_uring_poll(struct file *file, poll_table *wait) +{ + struct io_ring_ctx *ctx = file->private_data; + __poll_t mask = 0; + + poll_wait(file, &ctx->cq_wait, wait); + + + + + __asm__ __volatile__("": : :"memory"); + if (({ do { extern void __compiletime_assert_1919(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ctx->rings->sq.tail) == sizeof(char) || sizeof(ctx->rings->sq.tail) == sizeof(short) || sizeof(ctx->rings->sq.tail) == sizeof(int) || sizeof(ctx->rings->sq.tail) == sizeof(long)) || sizeof(ctx->rings->sq.tail) == sizeof(long long))) __compiletime_assert_1919(); } while (0); ({ typeof( _Generic((ctx->rings->sq.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ctx->rings->sq.tail))) __x = (*(const volatile typeof( _Generic((ctx->rings->sq.tail), char: (char)0, unsigned char: (unsigned char)0, signed char: (signed char)0, unsigned short: (unsigned short)0, signed short: (signed short)0, unsigned int: (unsigned int)0, signed int: (signed int)0, unsigned long: (unsigned long)0, signed long: (signed long)0, unsigned long long: (unsigned long long)0, signed long long: (signed long long)0, default: (ctx->rings->sq.tail))) *)&(ctx->rings->sq.tail)); do { } while (0); (typeof(ctx->rings->sq.tail))__x; }); }) - ctx->cached_sq_head != + ctx->rings->sq_ring_entries) + mask |= ( __poll_t)0x00000004 | ( __poll_t)0x00000100; + if (io_cqring_events(ctx, false)) + mask |= ( __poll_t)0x00000001 | ( __poll_t)0x00000040; + + return mask; +} + +static int io_uring_fasync(int fd, struct file *file, int on) +{ + struct io_ring_ctx *ctx = file->private_data; + + return fasync_helper(fd, file, on, &ctx->cq_fasync); +} + +static int io_remove_personalities(int id, void *p, void *data) +{ + struct io_ring_ctx *ctx = data; + const struct cred *cred; + + cred = idr_remove(&ctx->personality_idr, id); + if (cred) + put_cred(cred); + return 0; +} + +static void io_ring_exit_work(struct work_struct *work) +{ + struct io_ring_ctx *ctx = ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_1920(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct io_ring_ctx *)0)->exit_work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_1920(); } while (0); ((struct io_ring_ctx *)(__mptr - __builtin_offsetof(struct io_ring_ctx, exit_work))); }) + ; + + + + + + + + do { + if (ctx->rings) + io_cqring_overflow_flush(ctx, true); + io_iopoll_try_reap_events(ctx); + } while (!wait_for_completion_timeout(&ctx->ref_comp, 250/20)); + io_ring_ctx_free(ctx); +} + +static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) +{ + mutex_lock_nested(&ctx->uring_lock, 0); + percpu_ref_kill(&ctx->refs); + mutex_unlock(&ctx->uring_lock); + + io_kill_timeouts(ctx); + io_poll_remove_all(ctx); + + if (ctx->io_wq) + io_wq_cancel_all(ctx->io_wq); + + + if (ctx->rings) + io_cqring_overflow_flush(ctx, true); + io_iopoll_try_reap_events(ctx); + idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx); + do { static struct lock_class_key __key; __init_work(((&ctx->exit_work)), 0); ((&ctx->exit_work))->data = (atomic_long_t) { ((unsigned long)WORK_STRUCT_NO_POOL) }; lockdep_init_map(&((&ctx->exit_work))->lockdep_map, "(work_completion)""(&ctx->exit_work)", &__key, 0); INIT_LIST_HEAD(&((&ctx->exit_work))->entry); ((&ctx->exit_work))->func = ((io_ring_exit_work)); } while (0); + queue_work(system_wq, &ctx->exit_work); +} + +static int io_uring_release(struct inode *inode, struct file *file) +{ + struct io_ring_ctx *ctx = file->private_data; + + file->private_data = ((void *)0); + io_ring_ctx_wait_and_kill(ctx); + return 0; +} + +static bool io_wq_files_match(struct io_wq_work *work, void *data) +{ + struct files_struct *files = data; + + return work->files == files; +} + +static void io_uring_cancel_files(struct io_ring_ctx *ctx, + struct files_struct *files) +{ + if (list_empty_careful(&ctx->inflight_list)) + return; + + + io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true); + + while (!list_empty_careful(&ctx->inflight_list)) { + struct io_kiocb *cancel_req = ((void *)0), *req; + struct wait_queue_entry wait = { .private = get_current(), .func = autoremove_wake_function, .entry = { &((wait).entry), &((wait).entry) }, }; + + spin_lock_irq(&ctx->inflight_lock); + for (req = ({ void *__mptr = (void *)((&ctx->inflight_list)->next); do { extern void __compiletime_assert_1921(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((&ctx->inflight_list)->next)), typeof(((typeof(*req) *)0)->inflight_entry)) && !__builtin_types_compatible_p(typeof(*((&ctx->inflight_list)->next)), typeof(void))))) __compiletime_assert_1921(); } while (0); ((typeof(*req) *)(__mptr - __builtin_offsetof(typeof(*req), inflight_entry))); }); &req->inflight_entry != (&ctx->inflight_list); req = ({ void *__mptr = (void *)((req)->inflight_entry.next); do { extern void __compiletime_assert_1922(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*((req)->inflight_entry.next)), typeof(((typeof(*(req)) *)0)->inflight_entry)) && !__builtin_types_compatible_p(typeof(*((req)->inflight_entry.next)), typeof(void))))) __compiletime_assert_1922(); } while (0); ((typeof(*(req)) *)(__mptr - __builtin_offsetof(typeof(*(req)), inflight_entry))); })) { + if (req->work.files != files) + continue; + + if (!refcount_inc_not_zero(&req->refs)) + continue; + cancel_req = req; + break; + } + if (cancel_req) + prepare_to_wait(&ctx->inflight_wait, &wait, + 0x0002); + spin_unlock_irq(&ctx->inflight_lock); + + + if (!cancel_req) + break; + + if (cancel_req->flags & REQ_F_OVERFLOW) { + spin_lock_irq(&ctx->completion_lock); + list_del(&cancel_req->list); + cancel_req->flags &= ~REQ_F_OVERFLOW; + if (list_empty(&ctx->cq_overflow_list)) { + clear_bit(0, &ctx->sq_check_overflow); + clear_bit(0, &ctx->cq_check_overflow); + } + spin_unlock_irq(&ctx->completion_lock); + + do { do { extern void __compiletime_assert_1923(void) __attribute__((__error__("Unsupported access size for {READ,WRITE}_ONCE()."))); if (!((sizeof(ctx->rings->cq_overflow) == sizeof(char) || sizeof(ctx->rings->cq_overflow) == sizeof(short) || sizeof(ctx->rings->cq_overflow) == sizeof(int) || sizeof(ctx->rings->cq_overflow) == sizeof(long)) || sizeof(ctx->rings->cq_overflow) == sizeof(long long))) __compiletime_assert_1923(); } while (0); do { *(volatile typeof(ctx->rings->cq_overflow) *)&(ctx->rings->cq_overflow) = (atomic_inc_return(&ctx->cached_cq_overflow)); } while (0); } while (0) + ; + + + + + + if (refcount_sub_and_test(2, &cancel_req->refs)) { + io_free_req(cancel_req); + finish_wait(&ctx->inflight_wait, &wait); + continue; + } + } else { + io_wq_cancel_work(ctx->io_wq, &cancel_req->work); + io_put_req(cancel_req); + } + + schedule(); + finish_wait(&ctx->inflight_wait, &wait); + } +} + +static bool io_cancel_task_cb(struct io_wq_work *work, void *data) +{ + struct io_kiocb *req = ({ void *__mptr = (void *)(work); do { extern void __compiletime_assert_1924(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(work)), typeof(((struct io_kiocb *)0)->work)) && !__builtin_types_compatible_p(typeof(*(work)), typeof(void))))) __compiletime_assert_1924(); } while (0); ((struct io_kiocb *)(__mptr - __builtin_offsetof(struct io_kiocb, work))); }); + struct task_struct *task = data; + + return req->task == task; +} + +static int io_uring_flush(struct file *file, void *data) +{ + struct io_ring_ctx *ctx = file->private_data; + + io_uring_cancel_files(ctx, data); + + + + + if (fatal_signal_pending(get_current()) || (get_current()->flags & 0x00000004)) + io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, get_current(), true); + + return 0; +} + +static void *io_uring_validate_mmap_request(struct file *file, + loff_t pgoff, size_t sz) +{ + struct io_ring_ctx *ctx = file->private_data; + loff_t offset = pgoff << 12; + struct page *page; + void *ptr; + + switch (offset) { + case 0ULL: + case 0x8000000ULL: + ptr = ctx->rings; + break; + case 0x10000000ULL: + ptr = ctx->sq_sqes; + break; + default: + return ERR_PTR(-22); + } + + page = virt_to_head_page(ptr); + if (sz > page_size(page)) + return ERR_PTR(-22); + + return ptr; +} + + + +static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) +{ + size_t sz = vma->vm_end - vma->vm_start; + unsigned long pfn; + void *ptr; + + ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + pfn = virt_to_phys(ptr) >> 12; + return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); +} +# 7932 "fs/io_uring.c" +static const char *types__io_uring_enter[] = { "unsigned int", "u32", "u32", "u32", "const sigset_t *", "size_t" }; static const char *args__io_uring_enter[] = { "fd", "to_submit", "min_complete", "flags", "sig", "sigsz" }; static struct syscall_metadata __syscall_meta__io_uring_enter; static struct trace_event_call __attribute__((__used__)) event_enter__io_uring_enter = { .class = &event_class_syscall_enter, { .name = "sys_enter""_io_uring_enter", }, .event.funcs = &enter_syscall_print_funcs, .data = (void *)&__syscall_meta__io_uring_enter, .flags = TRACE_EVENT_FL_CAP_ANY, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_enter__io_uring_enter = &event_enter__io_uring_enter;; static struct syscall_metadata __syscall_meta__io_uring_enter; static struct trace_event_call __attribute__((__used__)) event_exit__io_uring_enter = { .class = &event_class_syscall_exit, { .name = "sys_exit""_io_uring_enter", }, .event.funcs = &exit_syscall_print_funcs, .data = (void *)&__syscall_meta__io_uring_enter, .flags = TRACE_EVENT_FL_CAP_ANY, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_exit__io_uring_enter = &event_exit__io_uring_enter;; static struct syscall_metadata __attribute__((__used__)) __syscall_meta__io_uring_enter = { .name = "sys""_io_uring_enter", .syscall_nr = -1, .nb_args = 6, .types = 6 ? types__io_uring_enter : ((void *)0), .args = 6 ? args__io_uring_enter : ((void *)0), .enter_event = &event_enter__io_uring_enter, .exit_event = &event_exit__io_uring_enter, .enter_fields = { &(__syscall_meta__io_uring_enter.enter_fields), &(__syscall_meta__io_uring_enter.enter_fields) }, }; static struct syscall_metadata __attribute__((__used__)) __attribute__((section("__syscalls_metadata"))) *__p_syscall_meta__io_uring_enter = &__syscall_meta__io_uring_enter; static long __se_sys_io_uring_enter(__typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))), 0LL, 0L)) fd, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))), 0LL, 0L)) to_submit, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))), 0LL, 0L)) min_complete, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))), 0LL, 0L)) flags, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( const sigset_t *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( const sigset_t *)0), typeof(0ULL))), 0LL, 0L)) sig, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( size_t)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( size_t)0), typeof(0ULL))), 0LL, 0L)) sigsz); static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long __do_sys_io_uring_enter(unsigned int fd, u32 to_submit, u32 min_complete, u32 flags, const sigset_t * sig, size_t sigsz); long __x64_sys_io_uring_enter(const struct pt_regs *regs); static struct error_injection_entry __attribute__((__used__)) __attribute__((__section__("_error_injection_whitelist"))) _eil_addr___x64_sys_io_uring_enter = { .addr = (unsigned long)__x64_sys_io_uring_enter, .etype = EI_ETYPE_ERRNO, };; long __x64_sys_io_uring_enter(const struct pt_regs *regs) { return __se_sys_io_uring_enter(regs->di, regs->si, regs->dx, regs->r10, regs->r8, regs->r9); } long __ia32_sys_io_uring_enter(const struct pt_regs *regs); static struct error_injection_entry __attribute__((__used__)) __attribute__((__section__("_error_injection_whitelist"))) _eil_addr___ia32_sys_io_uring_enter = { .addr = (unsigned long)__ia32_sys_io_uring_enter, .etype = EI_ETYPE_ERRNO, };; long __ia32_sys_io_uring_enter(const struct pt_regs *regs) { return __se_sys_io_uring_enter((unsigned int)regs->bx, (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->si, (unsigned int)regs->di, (unsigned int)regs->bp); } static long __se_sys_io_uring_enter(__typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))), 0LL, 0L)) fd, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))), 0LL, 0L)) to_submit, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))), 0LL, 0L)) min_complete, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))), 0LL, 0L)) flags, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( const sigset_t *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( const sigset_t *)0), typeof(0ULL))), 0LL, 0L)) sig, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( size_t)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( size_t)0), typeof(0ULL))), 0LL, 0L)) sigsz) { long ret = __do_sys_io_uring_enter(( unsigned int) fd, ( u32) to_submit, ( u32) min_complete, ( u32) flags, ( const sigset_t *) sig, ( size_t) sigsz); (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))) && sizeof(unsigned int) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))) && sizeof(u32) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))) && sizeof(u32) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))) && sizeof(u32) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( const sigset_t *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( const sigset_t *)0), typeof(0ULL))) && sizeof(const sigset_t *) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( size_t)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( size_t)0), typeof(0ULL))) && sizeof(size_t) > sizeof(long))); }))); do { } while (0); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long __do_sys_io_uring_enter(unsigned int fd, u32 to_submit, u32 min_complete, u32 flags, const sigset_t * sig, size_t sigsz) + + +{ + struct io_ring_ctx *ctx; + long ret = -9; + int submitted = 0; + struct fd f; + + io_run_task_work(); + + if (flags & ~((1U << 0) | (1U << 1))) + return -22; + + f = fdget(fd); + if (!f.file) + return -9; + + ret = -95; + if (f.file->f_op != &io_uring_fops) + goto out_fput; + + ret = -6; + ctx = f.file->private_data; + if (!percpu_ref_tryget(&ctx->refs)) + goto out_fput; + + + + + + + ret = 0; + if (ctx->flags & (1U << 1)) { + if (!list_empty_careful(&ctx->cq_overflow_list)) + io_cqring_overflow_flush(ctx, false); + if (flags & (1U << 1)) + __wake_up(&ctx->sqo_wait, (0x0001 | 0x0002), 1, ((void *)0)); + submitted = to_submit; + } else if (to_submit) { + mutex_lock_nested(&ctx->uring_lock, 0); + submitted = io_submit_sqes(ctx, to_submit, f.file, fd); + mutex_unlock(&ctx->uring_lock); + + if (submitted != to_submit) + goto out; + } + if (flags & (1U << 0)) { + min_complete = __builtin_choose_expr(((!!(sizeof((typeof(min_complete) *)1 == (typeof(ctx->cq_entries) *)1))) && ((sizeof(int) == sizeof(*(8 ? ((void *)((long)(min_complete) * 0l)) : (int *)8))) && (sizeof(int) == sizeof(*(8 ? ((void *)((long)(ctx->cq_entries) * 0l)) : (int *)8))))), ((min_complete) < (ctx->cq_entries) ? (min_complete) : (ctx->cq_entries)), ({ typeof(min_complete) __UNIQUE_ID___x1925 = (min_complete); typeof(ctx->cq_entries) __UNIQUE_ID___y1926 = (ctx->cq_entries); ((__UNIQUE_ID___x1925) < (__UNIQUE_ID___y1926) ? (__UNIQUE_ID___x1925) : (__UNIQUE_ID___y1926)); })); + + + + + + + + if (ctx->flags & (1U << 0) && + !(ctx->flags & (1U << 1))) { + ret = io_iopoll_check(ctx, min_complete); + } else { + ret = io_cqring_wait(ctx, min_complete, sig, sigsz); + } + } + +out: + percpu_ref_put(&ctx->refs); +out_fput: + fdput(f); + return submitted ? submitted : ret; +} + + +static int io_uring_show_cred(int id, void *p, void *data) +{ + const struct cred *cred = p; + struct seq_file *m = data; + struct user_namespace *uns = seq_user_ns(m); + struct group_info *gi; + kernel_cap_t cap; + unsigned __capi; + int g; + + seq_printf(m, "%5d\n", id); + seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid)); + seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid)); + seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid)); + seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid)); + seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid)); + seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid)); + seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid)); + seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid)); + seq_puts(m, "\n\tGroups:\t"); + gi = cred->group_info; + for (g = 0; g < gi->ngroups; g++) { + seq_put_decimal_ull(m, g ? " " : "", + from_kgid_munged(uns, gi->gid[g])); + } + seq_puts(m, "\n\tCapEff:\t"); + cap = cred->cap_effective; + for (__capi = 0; __capi < 2; ++__capi) + seq_put_hex_ll(m, ((void *)0), cap.cap[((2) - 1) - __capi], 8); + seq_putc(m, '\n'); + return 0; +} + +static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) +{ + int i; + + mutex_lock_nested(&ctx->uring_lock, 0); + seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); + for (i = 0; i < ctx->nr_user_files; i++) { + struct fixed_file_table *table; + struct file *f; + + table = &ctx->file_data->table[i >> 9]; + f = table->files[i & ((1U << 9) - 1)]; + if (f) + seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname); + else + seq_printf(m, "%5u: \n", i); + } + seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); + for (i = 0; i < ctx->nr_user_bufs; i++) { + struct io_mapped_ubuf *buf = &ctx->user_bufs[i]; + + seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, + (unsigned int) buf->len); + } + if (!idr_is_empty(&ctx->personality_idr)) { + seq_printf(m, "Personalities:\n"); + idr_for_each(&ctx->personality_idr, io_uring_show_cred, m); + } + seq_printf(m, "PollList:\n"); + spin_lock_irq(&ctx->completion_lock); + for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { + struct hlist_head *list = &ctx->cancel_hash[i]; + struct io_kiocb *req; + + for (req = ({ typeof((list)->first) ____ptr = ((list)->first); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1927(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(req)) *)0)->hash_node)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1927(); } while (0); ((typeof(*(req)) *)(__mptr - __builtin_offsetof(typeof(*(req)), hash_node))); }) : ((void *)0); }); req; req = ({ typeof((req)->hash_node.next) ____ptr = ((req)->hash_node.next); ____ptr ? ({ void *__mptr = (void *)(____ptr); do { extern void __compiletime_assert_1928(void) __attribute__((__error__("pointer type mismatch in container_of()"))); if (!(!(!__builtin_types_compatible_p(typeof(*(____ptr)), typeof(((typeof(*(req)) *)0)->hash_node)) && !__builtin_types_compatible_p(typeof(*(____ptr)), typeof(void))))) __compiletime_assert_1928(); } while (0); ((typeof(*(req)) *)(__mptr - __builtin_offsetof(typeof(*(req)), hash_node))); }) : ((void *)0); })) + seq_printf(m, " op=%d, task_works=%d\n", req->opcode, + req->task->task_works != ((void *)0)); + } + spin_unlock_irq(&ctx->completion_lock); + mutex_unlock(&ctx->uring_lock); +} + +static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) +{ + struct io_ring_ctx *ctx = f->private_data; + + if (percpu_ref_tryget(&ctx->refs)) { + __io_uring_show_fdinfo(ctx, m); + percpu_ref_put(&ctx->refs); + } +} + + +static const struct file_operations io_uring_fops = { + .release = io_uring_release, + .flush = io_uring_flush, + .mmap = io_uring_mmap, + + + + + .poll = io_uring_poll, + .fasync = io_uring_fasync, + + .show_fdinfo = io_uring_show_fdinfo, + +}; + +static int io_allocate_scq_urings(struct io_ring_ctx *ctx, + struct io_uring_params *p) +{ + struct io_rings *rings; + size_t size, sq_array_offset; + + size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset); + if (size == (~(size_t)0)) + return -75; + + rings = io_mem_alloc(size); + if (!rings) + return -12; + + ctx->rings = rings; + ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); + rings->sq_ring_mask = p->sq_entries - 1; + rings->cq_ring_mask = p->cq_entries - 1; + rings->sq_ring_entries = p->sq_entries; + rings->cq_ring_entries = p->cq_entries; + ctx->sq_mask = rings->sq_ring_mask; + ctx->cq_mask = rings->cq_ring_mask; + ctx->sq_entries = rings->sq_ring_entries; + ctx->cq_entries = rings->cq_ring_entries; + + size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); + if (size == (~(size_t)0)) { + io_mem_free(ctx->rings); + ctx->rings = ((void *)0); + return -75; + } + + ctx->sq_sqes = io_mem_alloc(size); + if (!ctx->sq_sqes) { + io_mem_free(ctx->rings); + ctx->rings = ((void *)0); + return -12; + } + + return 0; +} + + + + + + + +static int io_uring_get_fd(struct io_ring_ctx *ctx) +{ + struct file *file; + int ret; + + + ret = sock_create_kern(&init_net, 1, SOCK_RAW, IPPROTO_IP, + &ctx->ring_sock); + if (ret) + return ret; + + + ret = get_unused_fd_flags(00000002 | 02000000); + if (ret < 0) + goto err; + + file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx, + 00000002 | 02000000); + if (IS_ERR(file)) { + put_unused_fd(ret); + ret = PTR_ERR(file); + goto err; + } + + + ctx->ring_sock->file = file; + + fd_install(ret, file); + return ret; +err: + + sock_release(ctx->ring_sock); + ctx->ring_sock = ((void *)0); + + return ret; +} + +static int io_uring_create(unsigned entries, struct io_uring_params *p, + struct io_uring_params *params) +{ + struct user_struct *user = ((void *)0); + struct io_ring_ctx *ctx; + bool limit_mem; + int ret; + + if (!entries) + return -22; + if (entries > 32768) { + if (!(p->flags & (1U << 4))) + return -22; + entries = 32768; + } +# 8214 "fs/io_uring.c" + p->sq_entries = ( __builtin_constant_p(entries) ? ( (entries == 1) ? 1 : (1UL << (( __builtin_constant_p((entries) - 1) ? ( __builtin_constant_p((entries) - 1) ? ( ((entries) - 1) < 2 ? 0 : ((entries) - 1) & (1ULL << 63) ? 63 : ((entries) - 1) & (1ULL << 62) ? 62 : ((entries) - 1) & (1ULL << 61) ? 61 : ((entries) - 1) & (1ULL << 60) ? 60 : ((entries) - 1) & (1ULL << 59) ? 59 : ((entries) - 1) & (1ULL << 58) ? 58 : ((entries) - 1) & (1ULL << 57) ? 57 : ((entries) - 1) & (1ULL << 56) ? 56 : ((entries) - 1) & (1ULL << 55) ? 55 : ((entries) - 1) & (1ULL << 54) ? 54 : ((entries) - 1) & (1ULL << 53) ? 53 : ((entries) - 1) & (1ULL << 52) ? 52 : ((entries) - 1) & (1ULL << 51) ? 51 : ((entries) - 1) & (1ULL << 50) ? 50 : ((entries) - 1) & (1ULL << 49) ? 49 : ((entries) - 1) & (1ULL << 48) ? 48 : ((entries) - 1) & (1ULL << 47) ? 47 : ((entries) - 1) & (1ULL << 46) ? 46 : ((entries) - 1) & (1ULL << 45) ? 45 : ((entries) - 1) & (1ULL << 44) ? 44 : ((entries) - 1) & (1ULL << 43) ? 43 : ((entries) - 1) & (1ULL << 42) ? 42 : ((entries) - 1) & (1ULL << 41) ? 41 : ((entries) - 1) & (1ULL << 40) ? 40 : ((entries) - 1) & (1ULL << 39) ? 39 : ((entries) - 1) & (1ULL << 38) ? 38 : ((entries) - 1) & (1ULL << 37) ? 37 : ((entries) - 1) & (1ULL << 36) ? 36 : ((entries) - 1) & (1ULL << 35) ? 35 : ((entries) - 1) & (1ULL << 34) ? 34 : ((entries) - 1) & (1ULL << 33) ? 33 : ((entries) - 1) & (1ULL << 32) ? 32 : ((entries) - 1) & (1ULL << 31) ? 31 : ((entries) - 1) & (1ULL << 30) ? 30 : ((entries) - 1) & (1ULL << 29) ? 29 : ((entries) - 1) & (1ULL << 28) ? 28 : ((entries) - 1) & (1ULL << 27) ? 27 : ((entries) - 1) & (1ULL << 26) ? 26 : ((entries) - 1) & (1ULL << 25) ? 25 : ((entries) - 1) & (1ULL << 24) ? 24 : ((entries) - 1) & (1ULL << 23) ? 23 : ((entries) - 1) & (1ULL << 22) ? 22 : ((entries) - 1) & (1ULL << 21) ? 21 : ((entries) - 1) & (1ULL << 20) ? 20 : ((entries) - 1) & (1ULL << 19) ? 19 : ((entries) - 1) & (1ULL << 18) ? 18 : ((entries) - 1) & (1ULL << 17) ? 17 : ((entries) - 1) & (1ULL << 16) ? 16 : ((entries) - 1) & (1ULL << 15) ? 15 : ((entries) - 1) & (1ULL << 14) ? 14 : ((entries) - 1) & (1ULL << 13) ? 13 : ((entries) - 1) & (1ULL << 12) ? 12 : ((entries) - 1) & (1ULL << 11) ? 11 : ((entries) - 1) & (1ULL << 10) ? 10 : ((entries) - 1) & (1ULL << 9) ? 9 : ((entries) - 1) & (1ULL << 8) ? 8 : ((entries) - 1) & (1ULL << 7) ? 7 : ((entries) - 1) & (1ULL << 6) ? 6 : ((entries) - 1) & (1ULL << 5) ? 5 : ((entries) - 1) & (1ULL << 4) ? 4 : ((entries) - 1) & (1ULL << 3) ? 3 : ((entries) - 1) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof((entries) - 1) <= 4) ? __ilog2_u32((entries) - 1) : __ilog2_u64((entries) - 1) ) + 1)) ) : __roundup_pow_of_two(entries) ); + if (p->flags & (1U << 3)) { + + + + + + if (p->cq_entries < p->sq_entries) + return -22; + if (p->cq_entries > (2 * 32768)) { + if (!(p->flags & (1U << 4))) + return -22; + p->cq_entries = (2 * 32768); + } + p->cq_entries = ( __builtin_constant_p(p->cq_entries) ? ( (p->cq_entries == 1) ? 1 : (1UL << (( __builtin_constant_p((p->cq_entries) - 1) ? ( __builtin_constant_p((p->cq_entries) - 1) ? ( ((p->cq_entries) - 1) < 2 ? 0 : ((p->cq_entries) - 1) & (1ULL << 63) ? 63 : ((p->cq_entries) - 1) & (1ULL << 62) ? 62 : ((p->cq_entries) - 1) & (1ULL << 61) ? 61 : ((p->cq_entries) - 1) & (1ULL << 60) ? 60 : ((p->cq_entries) - 1) & (1ULL << 59) ? 59 : ((p->cq_entries) - 1) & (1ULL << 58) ? 58 : ((p->cq_entries) - 1) & (1ULL << 57) ? 57 : ((p->cq_entries) - 1) & (1ULL << 56) ? 56 : ((p->cq_entries) - 1) & (1ULL << 55) ? 55 : ((p->cq_entries) - 1) & (1ULL << 54) ? 54 : ((p->cq_entries) - 1) & (1ULL << 53) ? 53 : ((p->cq_entries) - 1) & (1ULL << 52) ? 52 : ((p->cq_entries) - 1) & (1ULL << 51) ? 51 : ((p->cq_entries) - 1) & (1ULL << 50) ? 50 : ((p->cq_entries) - 1) & (1ULL << 49) ? 49 : ((p->cq_entries) - 1) & (1ULL << 48) ? 48 : ((p->cq_entries) - 1) & (1ULL << 47) ? 47 : ((p->cq_entries) - 1) & (1ULL << 46) ? 46 : ((p->cq_entries) - 1) & (1ULL << 45) ? 45 : ((p->cq_entries) - 1) & (1ULL << 44) ? 44 : ((p->cq_entries) - 1) & (1ULL << 43) ? 43 : ((p->cq_entries) - 1) & (1ULL << 42) ? 42 : ((p->cq_entries) - 1) & (1ULL << 41) ? 41 : ((p->cq_entries) - 1) & (1ULL << 40) ? 40 : ((p->cq_entries) - 1) & (1ULL << 39) ? 39 : ((p->cq_entries) - 1) & (1ULL << 38) ? 38 : ((p->cq_entries) - 1) & (1ULL << 37) ? 37 : ((p->cq_entries) - 1) & (1ULL << 36) ? 36 : ((p->cq_entries) - 1) & (1ULL << 35) ? 35 : ((p->cq_entries) - 1) & (1ULL << 34) ? 34 : ((p->cq_entries) - 1) & (1ULL << 33) ? 33 : ((p->cq_entries) - 1) & (1ULL << 32) ? 32 : ((p->cq_entries) - 1) & (1ULL << 31) ? 31 : ((p->cq_entries) - 1) & (1ULL << 30) ? 30 : ((p->cq_entries) - 1) & (1ULL << 29) ? 29 : ((p->cq_entries) - 1) & (1ULL << 28) ? 28 : ((p->cq_entries) - 1) & (1ULL << 27) ? 27 : ((p->cq_entries) - 1) & (1ULL << 26) ? 26 : ((p->cq_entries) - 1) & (1ULL << 25) ? 25 : ((p->cq_entries) - 1) & (1ULL << 24) ? 24 : ((p->cq_entries) - 1) & (1ULL << 23) ? 23 : ((p->cq_entries) - 1) & (1ULL << 22) ? 22 : ((p->cq_entries) - 1) & (1ULL << 21) ? 21 : ((p->cq_entries) - 1) & (1ULL << 20) ? 20 : ((p->cq_entries) - 1) & (1ULL << 19) ? 19 : ((p->cq_entries) - 1) & (1ULL << 18) ? 18 : ((p->cq_entries) - 1) & (1ULL << 17) ? 17 : ((p->cq_entries) - 1) & (1ULL << 16) ? 16 : ((p->cq_entries) - 1) & (1ULL << 15) ? 15 : ((p->cq_entries) - 1) & (1ULL << 14) ? 14 : ((p->cq_entries) - 1) & (1ULL << 13) ? 13 : ((p->cq_entries) - 1) & (1ULL << 12) ? 12 : ((p->cq_entries) - 1) & (1ULL << 11) ? 11 : ((p->cq_entries) - 1) & (1ULL << 10) ? 10 : ((p->cq_entries) - 1) & (1ULL << 9) ? 9 : ((p->cq_entries) - 1) & (1ULL << 8) ? 8 : ((p->cq_entries) - 1) & (1ULL << 7) ? 7 : ((p->cq_entries) - 1) & (1ULL << 6) ? 6 : ((p->cq_entries) - 1) & (1ULL << 5) ? 5 : ((p->cq_entries) - 1) & (1ULL << 4) ? 4 : ((p->cq_entries) - 1) & (1ULL << 3) ? 3 : ((p->cq_entries) - 1) & (1ULL << 2) ? 2 : 1) : -1) : (sizeof((p->cq_entries) - 1) <= 4) ? __ilog2_u32((p->cq_entries) - 1) : __ilog2_u64((p->cq_entries) - 1) ) + 1)) ) : __roundup_pow_of_two(p->cq_entries) ); + } else { + p->cq_entries = 2 * p->sq_entries; + } + + user = get_uid((({ ({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("fs/io_uring.c", 8233, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); })->user; }))); + limit_mem = !capable(14); + + if (limit_mem) { + ret = __io_account_mem(user, + ring_pages(p->sq_entries, p->cq_entries)); + if (ret) { + free_uid(user); + return ret; + } + } + + ctx = io_ring_ctx_alloc(p); + if (!ctx) { + if (limit_mem) + __io_unaccount_mem(user, ring_pages(p->sq_entries, + p->cq_entries)); + free_uid(user); + return -12; + } + ctx->compat = in_compat_syscall(); + ctx->user = user; + ctx->creds = (get_cred(({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("fs/io_uring.c", 8255, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); }))); + + ret = io_allocate_scq_urings(ctx, p); + if (ret) + goto err; + + ret = io_sq_offload_start(ctx, p); + if (ret) + goto err; + + memset(&p->sq_off, 0, sizeof(p->sq_off)); + p->sq_off.head = __builtin_offsetof(struct io_rings, sq.head); + p->sq_off.tail = __builtin_offsetof(struct io_rings, sq.tail); + p->sq_off.ring_mask = __builtin_offsetof(struct io_rings, sq_ring_mask); + p->sq_off.ring_entries = __builtin_offsetof(struct io_rings, sq_ring_entries); + p->sq_off.flags = __builtin_offsetof(struct io_rings, sq_flags); + p->sq_off.dropped = __builtin_offsetof(struct io_rings, sq_dropped); + p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; + + memset(&p->cq_off, 0, sizeof(p->cq_off)); + p->cq_off.head = __builtin_offsetof(struct io_rings, cq.head); + p->cq_off.tail = __builtin_offsetof(struct io_rings, cq.tail); + p->cq_off.ring_mask = __builtin_offsetof(struct io_rings, cq_ring_mask); + p->cq_off.ring_entries = __builtin_offsetof(struct io_rings, cq_ring_entries); + p->cq_off.overflow = __builtin_offsetof(struct io_rings, cq_overflow); + p->cq_off.cqes = __builtin_offsetof(struct io_rings, cqes); + p->cq_off.flags = __builtin_offsetof(struct io_rings, cq_flags); + + p->features = (1U << 0) | (1U << 1) | + (1U << 2) | (1U << 3) | + (1U << 4) | (1U << 5) | + (1U << 6); + + if (copy_to_user(params, p, sizeof(*p))) { + ret = -14; + goto err; + } + + + + + ret = io_uring_get_fd(ctx); + if (ret < 0) + goto err; + + trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); + io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries), + ACCT_LOCKED); + ctx->limit_mem = limit_mem; + return ret; +err: + io_ring_ctx_wait_and_kill(ctx); + return ret; +} + + + + + + +static long io_uring_setup(u32 entries, struct io_uring_params *params) +{ + struct io_uring_params p; + int i; + + if (copy_from_user(&p, params, sizeof(p))) + return -14; + for (i = 0; i < (sizeof(p.resv) / sizeof((p.resv)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((p.resv)), typeof(&(p.resv)[0])))); })))); i++) { + if (p.resv[i]) + return -22; + } + + if (p.flags & ~((1U << 0) | (1U << 1) | + (1U << 2) | (1U << 3) | + (1U << 4) | (1U << 5))) + return -22; + + return io_uring_create(entries, &p, params); +} + +static const char *types__io_uring_setup[] = { "u32", "struct io_uring_params *" }; static const char *args__io_uring_setup[] = { "entries", "params" }; static struct syscall_metadata __syscall_meta__io_uring_setup; static struct trace_event_call __attribute__((__used__)) event_enter__io_uring_setup = { .class = &event_class_syscall_enter, { .name = "sys_enter""_io_uring_setup", }, .event.funcs = &enter_syscall_print_funcs, .data = (void *)&__syscall_meta__io_uring_setup, .flags = TRACE_EVENT_FL_CAP_ANY, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_enter__io_uring_setup = &event_enter__io_uring_setup;; static struct syscall_metadata __syscall_meta__io_uring_setup; static struct trace_event_call __attribute__((__used__)) event_exit__io_uring_setup = { .class = &event_class_syscall_exit, { .name = "sys_exit""_io_uring_setup", }, .event.funcs = &exit_syscall_print_funcs, .data = (void *)&__syscall_meta__io_uring_setup, .flags = TRACE_EVENT_FL_CAP_ANY, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_exit__io_uring_setup = &event_exit__io_uring_setup;; static struct syscall_metadata __attribute__((__used__)) __syscall_meta__io_uring_setup = { .name = "sys""_io_uring_setup", .syscall_nr = -1, .nb_args = 2, .types = 2 ? types__io_uring_setup : ((void *)0), .args = 2 ? args__io_uring_setup : ((void *)0), .enter_event = &event_enter__io_uring_setup, .exit_event = &event_exit__io_uring_setup, .enter_fields = { &(__syscall_meta__io_uring_setup.enter_fields), &(__syscall_meta__io_uring_setup.enter_fields) }, }; static struct syscall_metadata __attribute__((__used__)) __attribute__((section("__syscalls_metadata"))) *__p_syscall_meta__io_uring_setup = &__syscall_meta__io_uring_setup; static long __se_sys_io_uring_setup(__typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))), 0LL, 0L)) entries, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( struct io_uring_params *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( struct io_uring_params *)0), typeof(0ULL))), 0LL, 0L)) params); static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long __do_sys_io_uring_setup(u32 entries, struct io_uring_params * params); long __x64_sys_io_uring_setup(const struct pt_regs *regs); static struct error_injection_entry __attribute__((__used__)) __attribute__((__section__("_error_injection_whitelist"))) _eil_addr___x64_sys_io_uring_setup = { .addr = (unsigned long)__x64_sys_io_uring_setup, .etype = EI_ETYPE_ERRNO, };; long __x64_sys_io_uring_setup(const struct pt_regs *regs) { return __se_sys_io_uring_setup(regs->di, regs->si); } long __ia32_sys_io_uring_setup(const struct pt_regs *regs); static struct error_injection_entry __attribute__((__used__)) __attribute__((__section__("_error_injection_whitelist"))) _eil_addr___ia32_sys_io_uring_setup = { .addr = (unsigned long)__ia32_sys_io_uring_setup, .etype = EI_ETYPE_ERRNO, };; long __ia32_sys_io_uring_setup(const struct pt_regs *regs) { return __se_sys_io_uring_setup((unsigned int)regs->bx, (unsigned int)regs->cx); } static long __se_sys_io_uring_setup(__typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))), 0LL, 0L)) entries, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( struct io_uring_params *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( struct io_uring_params *)0), typeof(0ULL))), 0LL, 0L)) params) { long ret = __do_sys_io_uring_setup(( u32) entries, ( struct io_uring_params *) params); (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( u32)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( u32)0), typeof(0ULL))) && sizeof(u32) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( struct io_uring_params *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( struct io_uring_params *)0), typeof(0ULL))) && sizeof(struct io_uring_params *) > sizeof(long))); }))); do { } while (0); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long __do_sys_io_uring_setup(u32 entries, struct io_uring_params * params) + +{ + return io_uring_setup(entries, params); +} + +static int io_probe(struct io_ring_ctx *ctx, void *arg, unsigned nr_args) +{ + struct io_uring_probe *p; + size_t size; + int i, ret; + + size = __ab_c_size(nr_args, sizeof(*(p)->ops) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof(((p)->ops)), typeof(&((p)->ops)[0])))); }))), sizeof(*(p))); + if (size == (~(size_t)0)) + return -75; + p = kzalloc(size, ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (!p) + return -12; + + ret = -14; + if (copy_from_user(p, arg, size)) + goto out; + ret = -22; + if (memchr_inv(p, 0, size)) + goto out; + + p->last_op = IORING_OP_LAST - 1; + if (nr_args > IORING_OP_LAST) + nr_args = IORING_OP_LAST; + + for (i = 0; i < nr_args; i++) { + p->ops[i].op = i; + if (!io_op_defs[i].not_supported) + p->ops[i].flags = (1U << 0); + } + p->ops_len = i; + + ret = 0; + if (copy_to_user(arg, p, size)) + ret = -14; +out: + kfree(p); + return ret; +} + +static int io_register_personality(struct io_ring_ctx *ctx) +{ + const struct cred *creds = (get_cred(({ do { static bool __attribute__((__section__(".data.unlikely"))) __warned; if (debug_lockdep_rcu_enabled() && !__warned && (!((1)))) { __warned = true; lockdep_rcu_suspicious("fs/io_uring.c", 8382, "suspicious rcu_dereference_protected() usage"); } } while (0); ; ((typeof(*(get_current()->cred)) *)((get_current()->cred))); }))); + int id; + + id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1, + ((unsigned short)~0U), ((( gfp_t)(0x400u|0x800u)) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); + if (id < 0) + put_cred(creds); + return id; +} + +static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) +{ + const struct cred *old_creds; + + old_creds = idr_remove(&ctx->personality_idr, id); + if (old_creds) { + put_cred(old_creds); + return 0; + } + + return -22; +} + +static bool io_register_op_must_quiesce(int op) +{ + switch (op) { + case 3: + case 6: + case 8: + case 9: + case 10: + return false; + default: + return true; + } +} + +static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, + void *arg, unsigned nr_args) + + +{ + int ret; + + + + + + + if (percpu_ref_is_dying(&ctx->refs)) + return -6; + + if (io_register_op_must_quiesce(opcode)) { + percpu_ref_kill(&ctx->refs); +# 8445 "fs/io_uring.c" + mutex_unlock(&ctx->uring_lock); + ret = wait_for_completion_interruptible(&ctx->ref_comp); + mutex_lock_nested(&ctx->uring_lock, 0); + if (ret) { + percpu_ref_resurrect(&ctx->refs); + ret = -4; + goto out; + } + } + + switch (opcode) { + case 0: + ret = io_sqe_buffer_register(ctx, arg, nr_args); + break; + case 1: + ret = -22; + if (arg || nr_args) + break; + ret = io_sqe_buffer_unregister(ctx); + break; + case 2: + ret = io_sqe_files_register(ctx, arg, nr_args); + break; + case 3: + ret = -22; + if (arg || nr_args) + break; + ret = io_sqe_files_unregister(ctx); + break; + case 6: + ret = io_sqe_files_update(ctx, arg, nr_args); + break; + case 4: + case 7: + ret = -22; + if (nr_args != 1) + break; + ret = io_eventfd_register(ctx, arg); + if (ret) + break; + if (opcode == 7) + ctx->eventfd_async = 1; + else + ctx->eventfd_async = 0; + break; + case 5: + ret = -22; + if (arg || nr_args) + break; + ret = io_eventfd_unregister(ctx); + break; + case 8: + ret = -22; + if (!arg || nr_args > 256) + break; + ret = io_probe(ctx, arg, nr_args); + break; + case 9: + ret = -22; + if (arg || nr_args) + break; + ret = io_register_personality(ctx); + break; + case 10: + ret = -22; + if (arg) + break; + ret = io_unregister_personality(ctx, nr_args); + break; + default: + ret = -22; + break; + } + + if (io_register_op_must_quiesce(opcode)) { + + percpu_ref_reinit(&ctx->refs); +out: + reinit_completion(&ctx->ref_comp); + } + return ret; +} + +static const char *types__io_uring_register[] = { "unsigned int", "unsigned int", "void *", "unsigned int" }; static const char *args__io_uring_register[] = { "fd", "opcode", "arg", "nr_args" }; static struct syscall_metadata __syscall_meta__io_uring_register; static struct trace_event_call __attribute__((__used__)) event_enter__io_uring_register = { .class = &event_class_syscall_enter, { .name = "sys_enter""_io_uring_register", }, .event.funcs = &enter_syscall_print_funcs, .data = (void *)&__syscall_meta__io_uring_register, .flags = TRACE_EVENT_FL_CAP_ANY, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_enter__io_uring_register = &event_enter__io_uring_register;; static struct syscall_metadata __syscall_meta__io_uring_register; static struct trace_event_call __attribute__((__used__)) event_exit__io_uring_register = { .class = &event_class_syscall_exit, { .name = "sys_exit""_io_uring_register", }, .event.funcs = &exit_syscall_print_funcs, .data = (void *)&__syscall_meta__io_uring_register, .flags = TRACE_EVENT_FL_CAP_ANY, }; static struct trace_event_call __attribute__((__used__)) __attribute__((section("_ftrace_events"))) *__event_exit__io_uring_register = &event_exit__io_uring_register;; static struct syscall_metadata __attribute__((__used__)) __syscall_meta__io_uring_register = { .name = "sys""_io_uring_register", .syscall_nr = -1, .nb_args = 4, .types = 4 ? types__io_uring_register : ((void *)0), .args = 4 ? args__io_uring_register : ((void *)0), .enter_event = &event_enter__io_uring_register, .exit_event = &event_exit__io_uring_register, .enter_fields = { &(__syscall_meta__io_uring_register.enter_fields), &(__syscall_meta__io_uring_register.enter_fields) }, }; static struct syscall_metadata __attribute__((__used__)) __attribute__((section("__syscalls_metadata"))) *__p_syscall_meta__io_uring_register = &__syscall_meta__io_uring_register; static long __se_sys_io_uring_register(__typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))), 0LL, 0L)) fd, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))), 0LL, 0L)) opcode, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( void *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( void *)0), typeof(0ULL))), 0LL, 0L)) arg, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))), 0LL, 0L)) nr_args); static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long __do_sys_io_uring_register(unsigned int fd, unsigned int opcode, void * arg, unsigned int nr_args); long __x64_sys_io_uring_register(const struct pt_regs *regs); static struct error_injection_entry __attribute__((__used__)) __attribute__((__section__("_error_injection_whitelist"))) _eil_addr___x64_sys_io_uring_register = { .addr = (unsigned long)__x64_sys_io_uring_register, .etype = EI_ETYPE_ERRNO, };; long __x64_sys_io_uring_register(const struct pt_regs *regs) { return __se_sys_io_uring_register(regs->di, regs->si, regs->dx, regs->r10); } long __ia32_sys_io_uring_register(const struct pt_regs *regs); static struct error_injection_entry __attribute__((__used__)) __attribute__((__section__("_error_injection_whitelist"))) _eil_addr___ia32_sys_io_uring_register = { .addr = (unsigned long)__ia32_sys_io_uring_register, .etype = EI_ETYPE_ERRNO, };; long __ia32_sys_io_uring_register(const struct pt_regs *regs) { return __se_sys_io_uring_register((unsigned int)regs->bx, (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->si); } static long __se_sys_io_uring_register(__typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))), 0LL, 0L)) fd, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))), 0LL, 0L)) opcode, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( void *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( void *)0), typeof(0ULL))), 0LL, 0L)) arg, __typeof(__builtin_choose_expr((__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))), 0LL, 0L)) nr_args) { long ret = __do_sys_io_uring_register(( unsigned int) fd, ( unsigned int) opcode, ( void *) arg, ( unsigned int) nr_args); (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))) && sizeof(unsigned int) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))) && sizeof(unsigned int) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( void *)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( void *)0), typeof(0ULL))) && sizeof(void *) > sizeof(long))); }))), (void)((int)(sizeof(struct { int:(-!!(!(__builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0LL)) || __builtin_types_compatible_p(typeof(( unsigned int)0), typeof(0ULL))) && sizeof(unsigned int) > sizeof(long))); }))); do { } while (0); return ret; } static inline __attribute__((__gnu_inline__)) __attribute__((__unused__)) __attribute__((no_instrument_function)) long __do_sys_io_uring_register(unsigned int fd, unsigned int opcode, void * arg, unsigned int nr_args) + +{ + struct io_ring_ctx *ctx; + long ret = -9; + struct fd f; + + f = fdget(fd); + if (!f.file) + return -9; + + ret = -95; + if (f.file->f_op != &io_uring_fops) + goto out_fput; + + ctx = f.file->private_data; + + mutex_lock_nested(&ctx->uring_lock, 0); + ret = __io_uring_register(ctx, opcode, arg, nr_args); + mutex_unlock(&ctx->uring_lock); + trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, + ctx->cq_ev_fd != ((void *)0), ret); +out_fput: + fdput(f); + return ret; +} + +static int __attribute__((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((__indirect_branch__("keep"))) io_uring_init(void) +{ + + + + + + + + do { extern void __compiletime_assert_1929(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(struct io_uring_sqe) != 64"))); if (!(!(sizeof(struct io_uring_sqe) != 64))) __compiletime_assert_1929(); } while (0); + do { do { extern void __compiletime_assert_1930(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, opcode) != 0"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, opcode) != 0))) __compiletime_assert_1930(); } while (0); do { extern void __compiletime_assert_1931(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u8) != sizeof_field(struct io_uring_sqe, opcode)"))); if (!(!(sizeof(__u8) != sizeof((((struct io_uring_sqe *)0)->opcode))))) __compiletime_assert_1931(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1932(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, flags) != 1"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, flags) != 1))) __compiletime_assert_1932(); } while (0); do { extern void __compiletime_assert_1933(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u8) != sizeof_field(struct io_uring_sqe, flags)"))); if (!(!(sizeof(__u8) != sizeof((((struct io_uring_sqe *)0)->flags))))) __compiletime_assert_1933(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1934(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, ioprio) != 2"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, ioprio) != 2))) __compiletime_assert_1934(); } while (0); do { extern void __compiletime_assert_1935(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u16) != sizeof_field(struct io_uring_sqe, ioprio)"))); if (!(!(sizeof(__u16) != sizeof((((struct io_uring_sqe *)0)->ioprio))))) __compiletime_assert_1935(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1936(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, fd) != 4"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, fd) != 4))) __compiletime_assert_1936(); } while (0); do { extern void __compiletime_assert_1937(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__s32) != sizeof_field(struct io_uring_sqe, fd)"))); if (!(!(sizeof(__s32) != sizeof((((struct io_uring_sqe *)0)->fd))))) __compiletime_assert_1937(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1938(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, off) != 8"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, off) != 8))) __compiletime_assert_1938(); } while (0); do { extern void __compiletime_assert_1939(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u64) != sizeof_field(struct io_uring_sqe, off)"))); if (!(!(sizeof(__u64) != sizeof((((struct io_uring_sqe *)0)->off))))) __compiletime_assert_1939(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1940(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, addr2) != 8"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, addr2) != 8))) __compiletime_assert_1940(); } while (0); do { extern void __compiletime_assert_1941(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u64) != sizeof_field(struct io_uring_sqe, addr2)"))); if (!(!(sizeof(__u64) != sizeof((((struct io_uring_sqe *)0)->addr2))))) __compiletime_assert_1941(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1942(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, addr) != 16"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, addr) != 16))) __compiletime_assert_1942(); } while (0); do { extern void __compiletime_assert_1943(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u64) != sizeof_field(struct io_uring_sqe, addr)"))); if (!(!(sizeof(__u64) != sizeof((((struct io_uring_sqe *)0)->addr))))) __compiletime_assert_1943(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1944(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, splice_off_in) != 16"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, splice_off_in) != 16))) __compiletime_assert_1944(); } while (0); do { extern void __compiletime_assert_1945(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u64) != sizeof_field(struct io_uring_sqe, splice_off_in)"))); if (!(!(sizeof(__u64) != sizeof((((struct io_uring_sqe *)0)->splice_off_in))))) __compiletime_assert_1945(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1946(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, len) != 24"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, len) != 24))) __compiletime_assert_1946(); } while (0); do { extern void __compiletime_assert_1947(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, len)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->len))))) __compiletime_assert_1947(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1948(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, rw_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, rw_flags) != 28))) __compiletime_assert_1948(); } while (0); do { extern void __compiletime_assert_1949(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__kernel_rwf_t) != sizeof_field(struct io_uring_sqe, rw_flags)"))); if (!(!(sizeof(__kernel_rwf_t) != sizeof((((struct io_uring_sqe *)0)->rw_flags))))) __compiletime_assert_1949(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1950(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, rw_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, rw_flags) != 28))) __compiletime_assert_1950(); } while (0); do { extern void __compiletime_assert_1951(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(int) != sizeof_field(struct io_uring_sqe, rw_flags)"))); if (!(!(sizeof(int) != sizeof((((struct io_uring_sqe *)0)->rw_flags))))) __compiletime_assert_1951(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1952(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, rw_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, rw_flags) != 28))) __compiletime_assert_1952(); } while (0); do { extern void __compiletime_assert_1953(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, rw_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->rw_flags))))) __compiletime_assert_1953(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1954(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, fsync_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, fsync_flags) != 28))) __compiletime_assert_1954(); } while (0); do { extern void __compiletime_assert_1955(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, fsync_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->fsync_flags))))) __compiletime_assert_1955(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1956(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, poll_events) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, poll_events) != 28))) __compiletime_assert_1956(); } while (0); do { extern void __compiletime_assert_1957(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u16) != sizeof_field(struct io_uring_sqe, poll_events)"))); if (!(!(sizeof(__u16) != sizeof((((struct io_uring_sqe *)0)->poll_events))))) __compiletime_assert_1957(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1958(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, poll32_events) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, poll32_events) != 28))) __compiletime_assert_1958(); } while (0); do { extern void __compiletime_assert_1959(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, poll32_events)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->poll32_events))))) __compiletime_assert_1959(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1960(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, sync_range_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, sync_range_flags) != 28))) __compiletime_assert_1960(); } while (0); do { extern void __compiletime_assert_1961(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, sync_range_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->sync_range_flags))))) __compiletime_assert_1961(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1962(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, msg_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, msg_flags) != 28))) __compiletime_assert_1962(); } while (0); do { extern void __compiletime_assert_1963(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, msg_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->msg_flags))))) __compiletime_assert_1963(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1964(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, timeout_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, timeout_flags) != 28))) __compiletime_assert_1964(); } while (0); do { extern void __compiletime_assert_1965(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, timeout_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->timeout_flags))))) __compiletime_assert_1965(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1966(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, accept_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, accept_flags) != 28))) __compiletime_assert_1966(); } while (0); do { extern void __compiletime_assert_1967(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, accept_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->accept_flags))))) __compiletime_assert_1967(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1968(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, cancel_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, cancel_flags) != 28))) __compiletime_assert_1968(); } while (0); do { extern void __compiletime_assert_1969(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, cancel_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->cancel_flags))))) __compiletime_assert_1969(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1970(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, open_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, open_flags) != 28))) __compiletime_assert_1970(); } while (0); do { extern void __compiletime_assert_1971(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, open_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->open_flags))))) __compiletime_assert_1971(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1972(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, statx_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, statx_flags) != 28))) __compiletime_assert_1972(); } while (0); do { extern void __compiletime_assert_1973(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, statx_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->statx_flags))))) __compiletime_assert_1973(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1974(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, fadvise_advice) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, fadvise_advice) != 28))) __compiletime_assert_1974(); } while (0); do { extern void __compiletime_assert_1975(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, fadvise_advice)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->fadvise_advice))))) __compiletime_assert_1975(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1976(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, splice_flags) != 28"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, splice_flags) != 28))) __compiletime_assert_1976(); } while (0); do { extern void __compiletime_assert_1977(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u32) != sizeof_field(struct io_uring_sqe, splice_flags)"))); if (!(!(sizeof(__u32) != sizeof((((struct io_uring_sqe *)0)->splice_flags))))) __compiletime_assert_1977(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1978(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, user_data) != 32"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, user_data) != 32))) __compiletime_assert_1978(); } while (0); do { extern void __compiletime_assert_1979(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u64) != sizeof_field(struct io_uring_sqe, user_data)"))); if (!(!(sizeof(__u64) != sizeof((((struct io_uring_sqe *)0)->user_data))))) __compiletime_assert_1979(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1980(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, buf_index) != 40"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, buf_index) != 40))) __compiletime_assert_1980(); } while (0); do { extern void __compiletime_assert_1981(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u16) != sizeof_field(struct io_uring_sqe, buf_index)"))); if (!(!(sizeof(__u16) != sizeof((((struct io_uring_sqe *)0)->buf_index))))) __compiletime_assert_1981(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1982(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, personality) != 42"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, personality) != 42))) __compiletime_assert_1982(); } while (0); do { extern void __compiletime_assert_1983(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__u16) != sizeof_field(struct io_uring_sqe, personality)"))); if (!(!(sizeof(__u16) != sizeof((((struct io_uring_sqe *)0)->personality))))) __compiletime_assert_1983(); } while (0); } while (0); + do { do { extern void __compiletime_assert_1984(void) __attribute__((__error__("BUILD_BUG_ON failed: " "offsetof(struct io_uring_sqe, splice_fd_in) != 44"))); if (!(!(__builtin_offsetof(struct io_uring_sqe, splice_fd_in) != 44))) __compiletime_assert_1984(); } while (0); do { extern void __compiletime_assert_1985(void) __attribute__((__error__("BUILD_BUG_ON failed: " "sizeof(__s32) != sizeof_field(struct io_uring_sqe, splice_fd_in)"))); if (!(!(sizeof(__s32) != sizeof((((struct io_uring_sqe *)0)->splice_fd_in))))) __compiletime_assert_1985(); } while (0); } while (0); + + do { extern void __compiletime_assert_1986(void) __attribute__((__error__("BUILD_BUG_ON failed: " "ARRAY_SIZE(io_op_defs) != IORING_OP_LAST"))); if (!(!((sizeof(io_op_defs) / sizeof((io_op_defs)[0]) + ((int)(sizeof(struct { int:(-!!(__builtin_types_compatible_p(typeof((io_op_defs)), typeof(&(io_op_defs)[0])))); })))) != IORING_OP_LAST))) __compiletime_assert_1986(); } while (0); + do { extern void __compiletime_assert_1987(void) __attribute__((__error__("BUILD_BUG_ON failed: " "__REQ_F_LAST_BIT >= 8 * sizeof(int)"))); if (!(!(__REQ_F_LAST_BIT >= 8 * sizeof(int)))) __compiletime_assert_1987(); } while (0); + req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), __alignof__(struct io_kiocb), (((slab_flags_t )0x00002000U) | ((slab_flags_t )0x00040000U)), ((void *)0)); + return 0; +}; +static void * __attribute__((__section__(".discard.addressable"))) __attribute__((__used__)) __addressable_io_uring_init8599 = (void *)&io_uring_init; asm(".section \"" ".initcall6" ".init\", \"a\" \n" "__initcall_" "io_uring_init" "6" ": \n" ".long " "io_uring_init" " - . \n" ".previous \n");; diff --git a/test/generics.c b/test/generics.c new file mode 100644 index 0000000..716a866 --- /dev/null +++ b/test/generics.c @@ -0,0 +1,4 @@ +int main(int argc, char *argv[]) { + int x = _Generic('a', char: 1, default: 0); + return x; +} \ No newline at end of file